Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
icemelon committed Mar 22, 2021
1 parent 858b620 commit feb72fc
Show file tree
Hide file tree
Showing 25 changed files with 64 additions and 73 deletions.
7 changes: 3 additions & 4 deletions include/tvm/runtime/device_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,8 +156,7 @@ class TVM_DLL DeviceAPI {
* \param event_src The source stream to synchronize.
* \param event_dst The destination stream to synchronize.
*/
virtual void SyncStreamFromTo(Device dev, TVMStreamHandle event_src,
TVMStreamHandle event_dst);
virtual void SyncStreamFromTo(Device dev, TVMStreamHandle event_src, TVMStreamHandle event_dst);
/*!
* \brief Allocate temporal workspace for backend execution.
*
Expand Down Expand Up @@ -299,8 +298,8 @@ inline std::ostream& operator<<(std::ostream& os, DLDevice dev);
* \return A Device with RPC session mask added, valid on the RPC client.
*/
inline Device AddRPCSessionMask(Device dev, int session_table_index) {
CHECK(!IsRPCSessionDevice(dev))
<< "AddRPCSessionMask: dev already non-zero RPCSessionIndex: " << dev;
CHECK(!IsRPCSessionDevice(dev)) << "AddRPCSessionMask: dev already non-zero RPCSessionIndex: "
<< dev;
dev.device_type =
static_cast<DLDeviceType>(dev.device_type | (kRPCSessMask * (session_table_index + 1)));
return dev;
Expand Down
4 changes: 2 additions & 2 deletions include/tvm/runtime/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -421,8 +421,8 @@ inline bool SaveDLTensor(dmlc::Stream* strm, const DLTensor* tensor) {
int64_t data_byte_size = type_bytes * num_elems;
strm->Write(data_byte_size);

if (DMLC_IO_NO_ENDIAN_SWAP && tensor->device.device_type == kDLCPU && tensor->strides == nullptr &&
tensor->byte_offset == 0) {
if (DMLC_IO_NO_ENDIAN_SWAP && tensor->device.device_type == kDLCPU &&
tensor->strides == nullptr && tensor->byte_offset == 0) {
// quick path
strm->Write(tensor->data, data_byte_size);
} else {
Expand Down
6 changes: 3 additions & 3 deletions src/relay/analysis/context_analysis.cc
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ class DeviceDomain {

// Check if the current domain equals the other one.
bool operator==(const DeviceDomain& other) const {
return device_.device_type == other.device_.device_type && device_.device_id == other.device_.device_id;
return device_.device_type == other.device_.device_type &&
device_.device_id == other.device_.device_id;
}

bool operator!=(const DeviceDomain& other) const { return !(*this == other); }
Expand Down Expand Up @@ -701,8 +702,7 @@ AnalysisResultMap ContextAnalysis(const IRModule& mod, const Device& default_dev

// Unpack the device type and deivce id fields in Device for PackedFunc calls
// as Device is not in the object system.
PackedAnalysisResultMap ContextAnalysisPacked(const IRModule& mod,
const Device& default_device) {
PackedAnalysisResultMap ContextAnalysisPacked(const IRModule& mod, const Device& default_device) {
PackedAnalysisResultMap ret;
auto res = ContextAnalysis(mod, default_device);
for (const auto& it : res) {
Expand Down
3 changes: 1 addition & 2 deletions src/relay/transforms/memory_alloc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,7 @@ class DialectRewriter : public ExprMutator {
}

// Allocate a tensor with a statically known shape.
Var MakeStaticAllocation(LetList* scope, const TensorType& type, Device dev,
String name_hint) {
Var MakeStaticAllocation(LetList* scope, const TensorType& type, Device dev, String name_hint) {
std::vector<int64_t> int_shape;
for (auto it : type->shape) {
const auto* imm = it.as<IntImmNode>();
Expand Down
7 changes: 3 additions & 4 deletions src/runtime/c_runtime_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -199,8 +199,7 @@ void DeviceAPI::FreeStream(Device dev, TVMStreamHandle stream) {
LOG(FATAL) << "Device does not support stream api.";
}

void DeviceAPI::SyncStreamFromTo(Device dev, TVMStreamHandle event_src,
TVMStreamHandle event_dst) {
void DeviceAPI::SyncStreamFromTo(Device dev, TVMStreamHandle event_src, TVMStreamHandle event_dst) {
LOG(FATAL) << "Device does not support stream api.";
}

Expand Down Expand Up @@ -597,8 +596,8 @@ int TVMDeviceAllocDataSpace(DLDevice dev, size_t nbytes, size_t alignment, DLDat
API_END();
}

int TVMDeviceAllocDataSpaceWithScope(DLDevice dev, int ndim, const int64_t* shape,
DLDataType dtype, const char* mem_scope, void** out_data) {
int TVMDeviceAllocDataSpaceWithScope(DLDevice dev, int ndim, const int64_t* shape, DLDataType dtype,
const char* mem_scope, void** out_data) {
API_BEGIN();
Optional<String> scope;
if (mem_scope != nullptr) {
Expand Down
3 changes: 2 additions & 1 deletion src/runtime/contrib/edgetpu/edgetpu_runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,10 @@
#ifndef TVM_RUNTIME_CONTRIB_EDGETPU_EDGETPU_RUNTIME_H_
#define TVM_RUNTIME_CONTRIB_EDGETPU_EDGETPU_RUNTIME_H_

#include <edgetpu.h>

#include <memory>
#include <string>
#include <edgetpu.h>

#include "../tflite/tflite_runtime.h"

Expand Down
3 changes: 2 additions & 1 deletion src/runtime/contrib/ethosn/ethosn_device.cc
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,8 @@ TVM_REGISTER_GLOBAL("relay.ethos-n.test.infra.inference_result")
for (int argc = 0; argc < args.size(); argc++) {
const DLTensor* tensor = args[argc];
auto shape = std::vector<int64_t>(tensor->shape, tensor->shape + tensor->ndim);
test_outputs.emplace_back(tvm::runtime::NDArray::Empty(shape, tensor->dtype, tensor->device));
test_outputs.emplace_back(
tvm::runtime::NDArray::Empty(shape, tensor->dtype, tensor->device));
test_outputs[test_outputs.size() - 1].CopyFrom(tensor);
}
});
Expand Down
3 changes: 1 addition & 2 deletions src/runtime/cpu_device_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,7 @@ class CPUDeviceAPI final : public DeviceAPI {
*rv = 1;
}
}
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment,
DLDataType type_hint) final {
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment, DLDataType type_hint) final {
void* ptr;
#if _MSC_VER
ptr = _aligned_malloc(nbytes, alignment);
Expand Down
4 changes: 2 additions & 2 deletions src/runtime/crt/common/crt_runtime_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ int TVMDeviceAllocDataSpace(DLDevice dev, size_t nbytes, size_t alignment, DLDat
return TVMPlatformMemoryAllocate(nbytes, dev, out_data);
}

int TVMDeviceAllocDataSpaceWithScope(DLDevice dev, int ndim, const int64_t* shape,
DLDataType dtype, const char* mem_scope, void** out_data) {
int TVMDeviceAllocDataSpaceWithScope(DLDevice dev, int ndim, const int64_t* shape, DLDataType dtype,
const char* mem_scope, void** out_data) {
size_t nbytes = 1;
for (int i = 0; i < ndim; ++i) {
nbytes *= shape[i];
Expand Down
3 changes: 1 addition & 2 deletions src/runtime/crt/graph_runtime/graph_runtime.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,8 +266,7 @@ int TVMGraphRuntimeGraphAttr_Load(TVMGraphRuntimeGraphAttr* attr, JSONReader* re
}
DLDevice dev = {kDLCPU, 0};
tvm_crt_error_t err =
TVMPlatformMemoryAllocate(TVM_CRT_STRLEN_DLTYPE * num_items, dev
, (void**)&attr->dltype);
TVMPlatformMemoryAllocate(TVM_CRT_STRLEN_DLTYPE * num_items, dev, (void**)&attr->dltype);
if (err != kTvmErrorNoError) {
fprintf(stderr, "memory allocate error: %08x", err);
return -1;
Expand Down
3 changes: 1 addition & 2 deletions src/runtime/cuda/cuda_device_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,7 @@ class CUDADeviceAPI final : public DeviceAPI {
}
*rv = value;
}
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment,
DLDataType type_hint) final {
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment, DLDataType type_hint) final {
ICHECK_EQ(256 % alignment, 0U) << "CUDA space is aligned at 256 bytes";
void* ret;
if (dev.device_type == kDLCPUPinned) {
Expand Down
2 changes: 1 addition & 1 deletion src/runtime/graph/graph_runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ void GraphRuntime::SetupStorage() {
{
std::vector<int64_t> shape_vec{attrs_.shape[i].begin(), attrs_.shape[i].end()};
DLTensor template_tensor{nullptr, Device{kDLCPU, 0}, static_cast<int>(shape_vec.size()),
vtype[i], shape_vec.data(), nullptr,
vtype[i], shape_vec.data(), nullptr,
0};
lookup_rv = lookup_linked_param_(module_, sid, &template_tensor, devices_[0]);
}
Expand Down
3 changes: 1 addition & 2 deletions src/runtime/graph/graph_runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,7 @@ class TVM_DLL GraphRuntime : public ModuleNode {
*/

void Init(const std::string& graph_json, tvm::runtime::Module module,
const std::vector<Device>& devs,
const PackedFunc lookup_linked_param_func = nullptr);
const std::vector<Device>& devs, const PackedFunc lookup_linked_param_func = nullptr);

/*!
* \brief Get the input index given the name of input.
Expand Down
10 changes: 5 additions & 5 deletions src/runtime/hexagon/hexagon_device_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ class HexagonDeviceAPI : public DeviceAPI {

protected:
void CopyDataFromTo(const void* from, size_t from_offset, void* to, size_t to_offset,
size_t num_bytes, Device dev_from, Device dev_to,
DLDataType type_hint, TVMStreamHandle stream) final;
size_t num_bytes, Device dev_from, Device dev_to, DLDataType type_hint,
TVMStreamHandle stream) final;
};

// HexagonDeviceAPI.
Expand All @@ -72,9 +72,9 @@ inline void HexagonDeviceAPI::FreeDataSpace(Device dev, void* ptr) {
}

inline void HexagonDeviceAPI::CopyDataFromTo(const void* from, size_t from_offset, void* to,
size_t to_offset, size_t num_bytes,
Device dev_from, Device dev_to,
DLDataType type_hint, TVMStreamHandle stream) {
size_t to_offset, size_t num_bytes, Device dev_from,
Device dev_to, DLDataType type_hint,
TVMStreamHandle stream) {
const char* src = static_cast<const char*>(from) + from_offset;
char* dst = static_cast<char*>(to) + to_offset;

Expand Down
5 changes: 2 additions & 3 deletions src/runtime/metal/metal_device_api.mm
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,8 @@ int GetWarpSize(id<MTLDevice> dev) {
}

void MetalWorkspace::CopyDataFromTo(const void* from, size_t from_offset, void* to,
size_t to_offset, size_t size, Device dev_from,
Device dev_to, DLDataType type_hint,
TVMStreamHandle stream) {
size_t to_offset, size_t size, Device dev_from, Device dev_to,
DLDataType type_hint, TVMStreamHandle stream) {
this->Init();
ICHECK(stream == nullptr);
Device dev = dev_from;
Expand Down
4 changes: 2 additions & 2 deletions src/runtime/minrpc/minrpc_server.h
Original file line number Diff line number Diff line change
Expand Up @@ -418,8 +418,8 @@ class MinRPCServer {
DLTensor* arr = reinterpret_cast<DLTensor*>(values[0].v_handle);
const char* mem_scope = (tcodes[1] == kTVMNullptr ? nullptr : values[1].v_str);
void* handle;
int call_ecode = TVMDeviceAllocDataSpaceWithScope(arr->device, arr->ndim, arr->shape, arr->dtype,
mem_scope, &handle);
int call_ecode = TVMDeviceAllocDataSpaceWithScope(arr->device, arr->ndim, arr->shape,
arr->dtype, mem_scope, &handle);
if (call_ecode == 0) {
this->ReturnHandle(handle);
} else {
Expand Down
5 changes: 3 additions & 2 deletions src/runtime/ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,9 @@ DLManagedTensor* NDArray::ToDLPack() const { return Internal::ToDLPack(get_mutab
NDArray NDArray::Empty(std::vector<int64_t> shape, DLDataType dtype, Device dev,
Optional<String> mem_scope) {
NDArray ret = Internal::Create(shape, dtype, dev);
ret.get_mutable()->dl_tensor.data = DeviceAPI::Get(ret->device)->AllocDataSpace(
ret->device, shape.size(), shape.data(), ret->dtype, mem_scope);
ret.get_mutable()->dl_tensor.data =
DeviceAPI::Get(ret->device)
->AllocDataSpace(ret->device, shape.size(), shape.data(), ret->dtype, mem_scope);
return ret;
}

Expand Down
11 changes: 4 additions & 7 deletions src/runtime/opencl/opencl_device_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,7 @@ OpenCLWorkspace* OpenCLWorkspace::Global() {
return inst;
}

void OpenCLWorkspace::SetDevice(Device dev) {
GetThreadEntry()->device.device_id = dev.device_id;
}
void OpenCLWorkspace::SetDevice(Device dev) { GetThreadEntry()->device.device_id = dev.device_id; }

void OpenCLWorkspace::GetAttr(Device dev, DeviceAttrKind kind, TVMRetValue* rv) {
this->Init();
Expand Down Expand Up @@ -136,9 +134,8 @@ void OpenCLWorkspace::FreeDataSpace(Device dev, void* ptr) {
}

void OpenCLWorkspace::CopyDataFromTo(const void* from, size_t from_offset, void* to,
size_t to_offset, size_t size, Device dev_from,
Device dev_to, DLDataType type_hint,
TVMStreamHandle stream) {
size_t to_offset, size_t size, Device dev_from, Device dev_to,
DLDataType type_hint, TVMStreamHandle stream) {
this->Init();
ICHECK(stream == nullptr);
if (IsOpenCLDevice(dev_from) && IsOpenCLDevice(dev_to)) {
Expand Down Expand Up @@ -264,7 +261,7 @@ void OpenCLWorkspace::Init(const std::string& type_key, const std::string& devic
}
cl_int err_code;
this->device = clCreateContext(nullptr, this->devices.size(), &(this->devices[0]), nullptr,
nullptr, &err_code);
nullptr, &err_code);
OPENCL_CHECK_ERROR(err_code);
ICHECK_EQ(this->queues.size(), 0U);
for (size_t i = 0; i < this->devices.size(); ++i) {
Expand Down
24 changes: 13 additions & 11 deletions src/runtime/rocm/rocm_device_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,11 @@ class ROCMDeviceAPI final : public DeviceAPI {
}
case kComputeVersion: {
std::ostringstream os;
ROCM_CALL(
hipDeviceGetAttribute(&value, hipDeviceAttributeComputeCapabilityMajor, device.device_id));
ROCM_CALL(hipDeviceGetAttribute(&value, hipDeviceAttributeComputeCapabilityMajor,
device.device_id));
os << value << ".";
ROCM_CALL(
hipDeviceGetAttribute(&value, hipDeviceAttributeComputeCapabilityMinor, device.device_id));
ROCM_CALL(hipDeviceGetAttribute(&value, hipDeviceAttributeComputeCapabilityMinor,
device.device_id));
os << value;
*rv = os.str();
return;
Expand All @@ -94,18 +94,21 @@ class ROCMDeviceAPI final : public DeviceAPI {
}
case kMaxThreadDimensions: {
int dims[3];
ROCM_CALL(hipDeviceGetAttribute(&dims[0], hipDeviceAttributeMaxBlockDimX, device.device_id));
ROCM_CALL(hipDeviceGetAttribute(&dims[1], hipDeviceAttributeMaxBlockDimY, device.device_id));
ROCM_CALL(hipDeviceGetAttribute(&dims[2], hipDeviceAttributeMaxBlockDimZ, device.device_id));
ROCM_CALL(
hipDeviceGetAttribute(&dims[0], hipDeviceAttributeMaxBlockDimX, device.device_id));
ROCM_CALL(
hipDeviceGetAttribute(&dims[1], hipDeviceAttributeMaxBlockDimY, device.device_id));
ROCM_CALL(
hipDeviceGetAttribute(&dims[2], hipDeviceAttributeMaxBlockDimZ, device.device_id));

std::stringstream ss;
ss << "[" << dims[0] << ", " << dims[1] << ", " << dims[2] << "]";
*rv = ss.str();
return;
}
case kMaxRegistersPerBlock:
ROCM_CALL(
hipDeviceGetAttribute(&value, hipDeviceAttributeMaxRegistersPerBlock, device.device_id));
ROCM_CALL(hipDeviceGetAttribute(&value, hipDeviceAttributeMaxRegistersPerBlock,
device.device_id));
break;
case kGcnArch: {
hipDeviceProp_t prop;
Expand All @@ -120,8 +123,7 @@ class ROCMDeviceAPI final : public DeviceAPI {
}
*rv = value;
}
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment,
DLDataType type_hint) final {
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment, DLDataType type_hint) final {
ROCM_CALL(hipSetDevice(dev.device_id));
ICHECK_EQ(256 % alignment, 0U) << "ROCM space is aligned at 256 bytes";
void* ret;
Expand Down
7 changes: 3 additions & 4 deletions src/runtime/rpc/rpc_device_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,7 @@ class RPCDeviceAPI final : public DeviceAPI {
return space;
}

void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment,
DLDataType type_hint) final {
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment, DLDataType type_hint) final {
auto sess = GetSess(dev);
auto remote_dev = RemoveRPCSessionMask(dev);
void* data =
Expand Down Expand Up @@ -119,8 +118,8 @@ class RPCDeviceAPI final : public DeviceAPI {

protected:
void CopyDataFromTo(const void* from, size_t from_offset, void* to, size_t to_offset,
size_t num_bytes, Device dev_from, Device dev_to,
DLDataType type_hint, TVMStreamHandle stream) final {
size_t num_bytes, Device dev_from, Device dev_to, DLDataType type_hint,
TVMStreamHandle stream) final {
LOG(FATAL) << "Not implemented.";
}

Expand Down
6 changes: 2 additions & 4 deletions src/runtime/rpc/rpc_endpoint.cc
Original file line number Diff line number Diff line change
Expand Up @@ -178,8 +178,7 @@ class RPCEndpoint::EventHandler : public dmlc::Stream {
<< args[i].AsObjectRef<ObjectRef>()->GetTypeKey() << " is not supported by RPC";
} else if (tcode == kDLDevice) {
DLDevice dev = args[i];
ICHECK(!IsRPCSessionDevice(dev))
<< "InternalError: cannot pass RPC context in the channel";
ICHECK(!IsRPCSessionDevice(dev)) << "InternalError: cannot pass RPC context in the channel";
}
}
}
Expand Down Expand Up @@ -1004,8 +1003,7 @@ class RPCClientSession : public RPCSession, public DeviceAPI {
}
}

void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment,
DLDataType type_hint) final {
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment, DLDataType type_hint) final {
return endpoint_->SysCallRemote(RPCCode::kDevAllocData, dev, nbytes, alignment, type_hint);
}

Expand Down
4 changes: 2 additions & 2 deletions src/runtime/rpc/rpc_module.cc
Original file line number Diff line number Diff line change
Expand Up @@ -342,8 +342,8 @@ inline void CPUCacheFlush(int begin_index, const TVMArgs& args) {
}
}

PackedFunc WrapTimeEvaluator(PackedFunc pf, Device dev, int number, int repeat,
int min_repeat_ms, PackedFunc f_preproc) {
PackedFunc WrapTimeEvaluator(PackedFunc pf, Device dev, int number, int repeat, int min_repeat_ms,
PackedFunc f_preproc) {
ICHECK(pf != nullptr);

if (static_cast<int>(dev.device_type) == static_cast<int>(kDLMicroDev)) {
Expand Down
4 changes: 2 additions & 2 deletions src/runtime/rpc/rpc_session.h
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,8 @@ struct RemoteSpace {
* \param f_preproc The function to be executed before we excetute time evaluator.
* \return f_timer A timer function.
*/
PackedFunc WrapTimeEvaluator(PackedFunc f, Device dev, int number, int repeat,
int min_repeat_ms, PackedFunc f_preproc = nullptr);
PackedFunc WrapTimeEvaluator(PackedFunc f, Device dev, int number, int repeat, int min_repeat_ms,
PackedFunc f_preproc = nullptr);

/*!
* \brief Create a Global RPC module that refers to the session.
Expand Down
3 changes: 2 additions & 1 deletion src/runtime/vm/memory_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ NDArray StorageObj::AllocNDArray(size_t offset, std::vector<int64_t> shape, DLDa
VerifyDataType(dtype);

// crtical zone: allocate header, cannot throw
NDArray::Container* container = new NDArray::Container(nullptr, shape, dtype, this->buffer.device);
NDArray::Container* container =
new NDArray::Container(nullptr, shape, dtype, this->buffer.device);

container->SetDeleter(StorageObj::Deleter);
size_t needed_size = GetDataSize(container->dl_tensor);
Expand Down
3 changes: 1 addition & 2 deletions src/runtime/vulkan/vulkan.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,8 +118,7 @@ class VulkanDeviceAPI final : public DeviceAPI {
void SetDevice(Device dev) final { VulkanThreadEntry::ThreadLocal()->device = dev; }
void GetAttr(Device dev, DeviceAttrKind kind, TVMRetValue* rv) final;
std::vector<uint32_t> GetComputeQueueFamilies(VkPhysicalDevice phy_dev);
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment,
DLDataType type_hint) final {
void* AllocDataSpace(Device dev, size_t nbytes, size_t alignment, DLDataType type_hint) final {
if (nbytes == 0) {
// Vulkan seems to have issues if we return nullptr on zero size alloc
nbytes = 1;
Expand Down

0 comments on commit feb72fc

Please sign in to comment.