Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix cmake cxx flags #9467

Merged
merged 3 commits into from
Sep 22, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions cmake/flags.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ if (NOT WIN32)
set(COMMON_FLAGS
-fPIC
-fno-omit-frame-pointer
-Werror
-Wall
-Wextra
-Wnon-virtual-dtor
Expand All @@ -139,10 +140,14 @@ set(COMMON_FLAGS
-Wno-error=maybe-uninitialized # Warning in boost gcc 7.2
)

if (NOT EMSCRIPTEN)
# disable -Werror for Emscripten
set(COMMON_FLAGS "${COMMON_FLAGS} -Werror")
endif(NOT EMSCRIPTEN)
if((CMAKE_CXX_COMPILER_ID STREQUAL "Clang") OR EMSCRIPTEN)
# disable -Werror
list(REMOVE_ITEM COMMON_FLAGS "-Werror")
endif()

if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.3))
list(APPEND COMMON_FLAGS "-Wno-error=deprecated-copy") # Warning in Eigen
endif()

set(GPU_COMMON_FLAGS
-fPIC
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,6 @@ bool NMSFixer::HandleMatchedResults(core::Model* model,
auto concat0_operand0 = concat0_operation->input_operands[0];
auto concat0_operand1 = concat0_operation->input_operands[1];
auto concat0_operand2 = concat0_operation->input_operands[2];
auto concat0_operand3 = concat0_operation->input_operands[3];
auto concat0_operand4 = concat0_operation->input_operands[4];
concat0_operation->input_operands = {
concat0_operand0, concat0_operand1, concat0_operand2, concat0_operand4};
Expand All @@ -230,14 +229,12 @@ bool NMSFixer::HandleMatchedResults(core::Model* model,
auto concat1_operand0 = concat1_operation->input_operands[0];
auto concat1_operand1 = concat1_operation->input_operands[1];
auto concat1_operand2 = concat1_operation->input_operands[2];
auto concat1_operand3 = concat1_operation->input_operands[3];
auto concat1_operand4 = concat1_operation->input_operands[4];
concat1_operation->input_operands = {
concat1_operand0, concat1_operand1, concat1_operand2, concat1_operand4};

auto concat2_operation = nodes.at("concat2")->operation;
auto concat2_operand0 = concat2_operation->input_operands[0];
auto concat2_operand1 = concat2_operation->input_operands[1];
auto concat2_operand2 = concat2_operation->input_operands[2];
auto nms_operation = nodes.at("nms")->operation;
auto nms_index_operand = nms_operation->output_operands[2];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class ConstantFoldOperationsFuser : public PatternMatcher {

void ConstantFoldOperationsFuser::BuildPattern() {
// Create patterns
auto operation = CreatePattern("operation")->IsOperation();
CreatePattern("operation")->IsOperation();
}

bool ConstantFoldOperationsFuser::HandleMatchedResults(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ void Conv2DActivationFuser::BuildPattern() {
// Operand patterns
auto conv2d_input_pattern =
CreatePattern("conv2d_input")->IsOperationInputOperand(conv2d_type_, 0);
int conv2d_fuse_code_index;
int conv2d_fuse_code_index = -1;
if (conv2d_type_ == NNADAPTER_CONV_2D) {
conv2d_fuse_code_index = 8;
} else if (conv2d_type_ == NNADAPTER_CONV_2D_TRANSPOSE) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ void Conv2DAddFuser::BuildPattern() {
// Operand patterns
auto conv2d_input_pattern =
CreatePattern("conv2d_input")->IsOperationInputOperand(conv2d_type_, 0);
int conv2d_fuse_code_index;
int conv2d_fuse_code_index = -1;
if (conv2d_type_ == NNADAPTER_CONV_2D) {
conv2d_fuse_code_index = 8;
} else if (conv2d_type_ == NNADAPTER_CONV_2D_TRANSPOSE) {
Expand Down Expand Up @@ -91,8 +91,6 @@ bool Conv2DAddFuser::HandleMatchedResults(
// Get the operands and operations from the matched subgraph nodes.
auto conv2d_operation = nodes.at("conv2d")->operation;
auto conv2d_fuse_code_operand = nodes.at("conv2d_fuse_code")->operand;
auto conv2d_input_operand = conv2d_operation->input_operands[0];
auto conv2d_output_operand = conv2d_operation->output_operands[0];
auto conv2d_filter_operand = conv2d_operation->input_operands[1];
auto conv2d_bias_operand = conv2d_operation->input_operands[2];
auto conv2d_group =
Expand All @@ -103,7 +101,6 @@ bool Conv2DAddFuser::HandleMatchedResults(
conv2d_output_channel_size =
conv2d_filter_operand->type.dimensions.data[1] * conv2d_group;
}
auto add_operation = nodes.at("add")->operation;
auto add_input_operand = nodes.at("add_input")->operand;
auto add_fuse_code_operand = nodes.at("add_fuse_code")->operand;
auto add_output_operand = nodes.at("add_output")->operand;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,6 @@ bool MatMulDequantAddFuser::HandleMatchedResults(
auto add_output_operand = add_operation->output_operands[0];
auto dequant_operation = nodes.at("dequant")->operation;
auto dequant_input_operand = dequant_operation->input_operands[0];
auto dequant_output_operand = dequant_operation->output_operands[0];
auto matmul_transpose_y =
*reinterpret_cast<bool*>(matmul_transpose_y_operand->buffer);
auto matmul_num_units =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class RemoveUselessCastFuser : public PatternMatcher {

void RemoveUselessCastFuser::BuildPattern() {
// Create patterns
auto cast = CreatePattern("cast", NNADAPTER_CAST);
CreatePattern("cast", NNADAPTER_CAST);
}

bool RemoveUselessCastFuser::HandleMatchedResults(
Expand Down Expand Up @@ -149,7 +149,6 @@ void RemoveUselessMulFuser::BuildPattern() {
bool RemoveUselessMulFuser::HandleMatchedResults(
core::Model* model, const std::map<std::string, Node*>& nodes) {
auto mul_x = nodes.at("mul_x")->operand;
auto mul = nodes.at("mul")->operation;
auto mul_out = nodes.at("mul_out")->operand;
if (IsModelInputOperand(mul_x) && IsModelOutputOperand(mul_out)) return false;

Expand Down
5 changes: 4 additions & 1 deletion lite/backends/nnadapter/nnadapter/src/runtime/device.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ class Context {
};

Context::Context(void* device, const char* properties) : device_(device) {
NNADAPTER_CHECK(device_);
// Extract the runtime parameters from the context properties
NNADAPTER_LOG(INFO) << "properties: " << std::string(properties);
std::string key_value;
Expand All @@ -89,7 +90,9 @@ Context::Context(void* device, const char* properties) : device_(device) {

class Program {
public:
explicit Program(Context* context) : context_(context) {}
explicit Program(Context* context) : context_(context) {
NNADAPTER_CHECK(context_);
}
~Program() { Clear(); }

int Validate(const core::Model* model, bool* supported_operations);
Expand Down
6 changes: 3 additions & 3 deletions lite/backends/x86/math/conv_direct_fp32.cc
Original file line number Diff line number Diff line change
Expand Up @@ -180,10 +180,10 @@ void conv_direct::generate_code(int ic,

// compute output whole line
// three parts: [left, middle, right]
auto cal_out_whole_line = [=, &temp](int oc_group, int ic_group) {
auto cal_out_whole_line = [=](int oc_group, int ic_group) {
int ow_bulk_i = ow / ow_bulk;

auto cal_bulk = [=, &temp](
auto cal_bulk = [=](
int oc_group, int ic_group, int l_pad, int r_pad, int bulk) {
load(oc_group, bulk);

Expand Down Expand Up @@ -255,7 +255,7 @@ void conv_direct::generate_code(int ic,
};

//
auto cal_with_ic_fixed = [=, &temp](int ic_group) {
auto cal_with_ic_fixed = [=](int ic_group) {
// ic_group is fixed
// according to oc !
Xbyak::Label label_oc_remain;
Expand Down
9 changes: 3 additions & 6 deletions lite/core/optimizer/mir/fusion/quant_dequant_op_fuser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -322,13 +322,12 @@ void DequantOpFuser::InsertNewNode(SSAGraph* graph,
#endif
op_desc.SetInputScale(weight_name, weight_scale);

// change the weight from the float type to int8 type.
// change the weight from the float type to int8 type.
#ifdef LITE_WITH_FPGA
Tensor temp_tensor;
temp_tensor.CopyDataFrom(*quantized_weight_t);
float* temp_data = temp_tensor.mutable_data<float>();
size_t weight_num = quantized_weight_t->data_size();

#ifdef LITE_WITH_FPGA
float* quantized_weight_data = quantized_weight_t->mutable_data<float>();
for (size_t i = 0; i < weight_num; i++) {
quantized_weight_data[i] = temp_data[i] * whole_weight_scale;
Expand Down Expand Up @@ -442,11 +441,10 @@ void ChannelWiseDequantOpFuser::InsertNewNode(SSAGraph* graph,
auto quantized_weight_var_name = quantized_op_weight->arg()->name;
auto quantized_weight_t =
scope->FindVar(quantized_weight_var_name)->GetMutable<lite::Tensor>();
#ifdef LITE_WITH_FPGA
Tensor temp_tensor;
temp_tensor.CopyDataFrom(*quantized_weight_t);
float* temp_data = temp_tensor.mutable_data<float>();

#ifdef LITE_WITH_FPGA
float* quantized_weight_data = quantized_weight_t->mutable_data<float>();
int channel = channel_scale_tensor->data_size();
int weight_chw = quantized_weight_t->data_size() / channel;
Expand All @@ -456,7 +454,6 @@ void ChannelWiseDequantOpFuser::InsertNewNode(SSAGraph* graph,
}
quantized_weight_t->set_persistable(true);
quantized_weight_t->set_precision(PRECISION(kFloat));

#else
CastPersistableTensorInPlace(quantized_weight_t, weight_bit_length);
#endif
Expand Down
2 changes: 2 additions & 0 deletions lite/kernels/opencl/conv_image_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1841,6 +1841,8 @@ void ConvImageCompute::Conv2d1x1opt() {
}
}

void ConvImageCompute::Conv2d3x3() { LOG(FATAL) << "Not implement."; }

void ConvImageCompute::Conv2dnxnopt() {
int arg_idx = 0;
status_ = kernel_.setArg(arg_idx++, c_blk_);
Expand Down
3 changes: 0 additions & 3 deletions lite/kernels/xpu/topk_v2_compute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,6 @@ namespace xpu {
void TopkV2Compute::Run() {
auto& param = this->Param<operators::TopkParam>();
auto& ctx = this->ctx_->As<XPUContext>();
const float* x_data = param.X->data<float>();
float* out_val = param.Out->mutable_data<float>();
auto out_ind = param.Indices->mutable_data<int64_t>();

DDim x_dims = param.X->dims();
int axis = param.axis;
Expand Down
7 changes: 4 additions & 3 deletions lite/operators/__xpu__multi_encoder_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -143,9 +143,10 @@ bool XPUMultiEncoderOp::AttachImpl(const cpp::OpDesc& op_desc,
param_.norm_before = op_desc.GetAttr<bool>("norm_before");
param_.adaptive_seqlen = op_desc.GetAttr<bool>("adaptive_seqlen");
param_.per_channel = op_desc.GetAttr<bool>("per_channel");
if (op_desc.HasAttr("enable_int8") && op_desc.GetAttr<bool>("enable_int8") ||
op_desc.HasAttr("enable_int16") &&
op_desc.GetAttr<bool>("enable_int16")) {
if ((op_desc.HasAttr("enable_int8") &&
op_desc.GetAttr<bool>("enable_int8")) ||
(op_desc.HasAttr("enable_int16") &&
op_desc.GetAttr<bool>("enable_int16"))) {
param_.input_max = op_desc.GetAttr<std::vector<float>>("FCInputMax");
}
param_.weight_max.clear();
Expand Down
11 changes: 0 additions & 11 deletions lite/tests/kernels/generate_proposals_v2_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -258,10 +258,6 @@ class GenerateProposalsV2ComputeTester : public arena::TestCase {
const T* im_info_data = im_info.data<T>();
T offset = pixel_offset ? static_cast<T>(1) : 0;
T zero(0);
T im_w =
is_scale ? round(im_info_data[1] / im_info_data[2]) : im_info_data[1];
T im_h =
is_scale ? round(im_info_data[0] / im_info_data[2]) : im_info_data[0];
for (int64_t i = 0; i < boxes->numel(); ++i) {
if (i % 4 == 0) {
boxes_data[i] =
Expand Down Expand Up @@ -301,13 +297,6 @@ class GenerateProposalsV2ComputeTester : public arena::TestCase {
T x_ctr = boxes_data[4 * i] + ws / 2;
T y_ctr = boxes_data[4 * i + 1] + hs / 2;

if (is_scale) {
T ws =
(boxes_data[4 * i + 2] - boxes_data[4 * i]) / im_info_data[2] + 1;
T hs = (boxes_data[4 * i + 3] - boxes_data[4 * i + 1]) /
im_info_data[2] +
1;
}
if (ws >= min_size && hs >= min_size && x_ctr <= im_info_data[1] &&
y_ctr <= im_info_data[0]) {
keep_data[keep_len++] = i;
Expand Down
2 changes: 0 additions & 2 deletions lite/tests/kernels/roll_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ class RollComputeTester : public arena::TestCase {

void RunBaseline(Scope* scope) override {
auto* x = scope->FindTensor(x_);
const auto* x_data = x->data<float>();
std::vector<int64_t> shifts;
if (!shifts_tensor_.empty()) {
auto* shift = scope->FindTensor(shifts_tensor_);
Expand All @@ -121,7 +120,6 @@ class RollComputeTester : public arena::TestCase {
out->CopyDataFrom(*x);
auto* out_data = out->mutable_data<float>();
for (size_t i = 0; i < nums; i++) {
int64_t input_size = input_dim.size();
ShiftAlongDim(out_data, input_dim, axis_[i], shifts_[i]);
}
}
Expand Down
2 changes: 1 addition & 1 deletion lite/tests/math/sgemm_compute_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ bool test_sgemm(bool tra,
auto da = ta.mutable_data<float>();
auto db = tb.mutable_data<float>();
auto dc = tc.mutable_data<float>();
auto dc_sve = tc_sve.mutable_data<float>();
auto dc_basic = tc_basic.mutable_data<float>();
auto dc_backup = tc_backup.mutable_data<float>();
auto dbias = tbias.mutable_data<float>();
Expand Down Expand Up @@ -212,6 +211,7 @@ bool test_sgemm(bool tra,
}
#ifdef LITE_WITH_ARM8_SVE2
// sve
auto dc_sve = tc_sve.mutable_data<float>();
Timer t1;
for (int i = 0; i < FLAGS_repeats; ++i) {
if (i == FLAGS_repeats - 1) {
Expand Down