Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[LITE][XPU] Supporting llvm and xpu device target #2711

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions lite/backends/xpu/device.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,11 @@ std::unique_ptr<xtcl::network::xRuntimeInstance> Device::Build(
}
xtcl::xNetwork network =
builder->FinalizeNetwork(xtcl::relay::TupleNode::make(all_outs));
auto target = xtcl::Target::Create(device_name_);
auto compiler = xtcl::network::xTensorCompiler(network, target);
auto target = xtcl::NullValue<xtcl::Target>();
if (!target_.empty()) {
target = xtcl::Target::Create(target_);
}
xtcl::network::xTensorCompiler compiler(network, target);
compiler.SetParams(*params); // Set the data of constant tensors
compiler.Build();
VLOG(3) << "[XPU] Build done";
Expand Down
22 changes: 18 additions & 4 deletions lite/backends/xpu/device.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#pragma once

#include <xtcl/xtcl.h>
#include <cstdlib>
#include <memory>
#include <string>
#include <utility>
Expand All @@ -30,7 +31,18 @@ class Device {
static Device x;
return x;
}
Device() {}
Device() {
char* name = std::getenv("XPU_DEVICE_NAME");
if (name) {
name_ = std::string(name);
}
// XPU_DEVICE_TARGET for XPU model building, which supports 'llvm' and 'xpu
// -libs=xdnn'
char* target = std::getenv("XPU_DEVICE_TARGET");
if (target) {
target_ = std::string(target);
}
}

// Build the XPU graph to the XPU runtime, return the XPU runtime which can be
// used to run inference.
Expand All @@ -39,10 +51,12 @@ class Device {
xtcl::network::xTensorCompiler::ParamNDArrayMap* params,
std::vector<xtcl::xExpr*>* outputs);

const std::string name() const { return name_; }
const std::string target() const { return target_; }

private:
// Keep reserved fields
int device_id_{0};
std::string device_name_{"llvm"};
std::string name_{""};
std::string target_{""};
};

} // namespace xpu
Expand Down
2 changes: 1 addition & 1 deletion lite/kernels/xpu/bridges/utility.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ DLDeviceType CvtDLDeviceType(TargetType in_type) {
out_type = kDLGPU;
break;
case TARGET(kXPU):
out_type = kDLCPU;
out_type = static_cast<DLDeviceType>(kDLXPU);
break;
default:
LOG(FATAL) << "[XPU] Can not convert target type(" << TargetToStr(in_type)
Expand Down
2 changes: 1 addition & 1 deletion lite/kernels/xpu/subgraph_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ int SubgraphEngine::LaunchDeviceProgram() {
// Update the data pointer of DLTensor to track the origin input tensors
device_itensors_[i].data =
const_cast<void*>(origin_itensors_[i]->raw_data());
device_program_->SetInputZeroCopy(device_inames_[i], &device_itensors_[i]);
device_program_->SetInput(device_inames_[i], &device_itensors_[i]);
}
// Run the XPU model
auto GetCurrentUS = []() -> double {
Expand Down
5 changes: 5 additions & 0 deletions lite/tools/build_xpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,11 @@ function main {
build_xpu
shift
;;
full_publish)
TARGET_NAME=publish_inference
build_xpu
shift
;;
*)
# unknown option
print_usage
Expand Down