Skip to content

Commit

Permalink
decouple cpu vulkan
Browse files Browse the repository at this point in the history
  • Loading branch information
nihui committed Jan 4, 2024
1 parent 6c046ba commit 8a29df6
Show file tree
Hide file tree
Showing 50 changed files with 95 additions and 95 deletions.
2 changes: 1 addition & 1 deletion src/layer/arm/convolution1d_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ int Convolution1D_arm::forward(const std::vector<Mat>& bottom_blobs, std::vector
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution1D);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution1D);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/arm/convolution_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ int Convolution_arm::create_pipeline(const Option& opt)

if ((!support_packing || !opt.use_packing_layout) && !opt.use_bf16_storage && kernel_w == kernel_h && dilation_w != 1 && dilation_h == dilation_w && stride_w == 1 && stride_h == 1)
{
convolution_dilation1 = ncnn::create_layer(ncnn::LayerType::Convolution);
convolution_dilation1 = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);

// set param
ncnn::ParamDict pd;
Expand Down Expand Up @@ -807,7 +807,7 @@ int Convolution_arm::forward(const std::vector<Mat>& bottom_blobs, std::vector<M
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/arm/convolutiondepthwise_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ int ConvolutionDepthWise_arm::create_group_ops(const Option& opt)
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);

// set param
ncnn::ParamDict pd;
Expand Down Expand Up @@ -650,7 +650,7 @@ int ConvolutionDepthWise_arm::forward(const std::vector<Mat>& bottom_blobs, std:
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::ConvolutionDepthWise);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/arm/deconvolution_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ int Deconvolution_arm::create_pipeline(const Option& opt)
{
const int maxk = kernel_w * kernel_h;

gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);

ncnn::ParamDict pd;
pd.set(2, 1); // transA
Expand Down Expand Up @@ -851,7 +851,7 @@ int Deconvolution_arm::forward(const std::vector<Mat>& bottom_blobs, std::vector
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/arm/deconvolution_arm_asimdhp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ int Deconvolution_arm::create_pipeline_fp16s(const Option& opt)
{
const int maxk = kernel_w * kernel_h;

gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);

ncnn::ParamDict pd;
pd.set(2, 1); // transA
Expand Down
4 changes: 2 additions & 2 deletions src/layer/arm/deconvolutiondepthwise_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ int DeconvolutionDepthWise_arm::create_pipeline(const Option& opt)
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);

// set param
ncnn::ParamDict pd;
Expand Down Expand Up @@ -562,7 +562,7 @@ int DeconvolutionDepthWise_arm::forward(const std::vector<Mat>& bottom_blobs, st
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::DeconvolutionDepthWise);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::DeconvolutionDepthWise);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/arm/deconvolutiondepthwise_arm_asimdhp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ int DeconvolutionDepthWise_arm::create_pipeline_fp16s(const Option& opt)
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);

// set param
ncnn::ParamDict pd;
Expand Down
2 changes: 1 addition & 1 deletion src/layer/arm/innerproduct_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ InnerProduct_arm::InnerProduct_arm()
int InnerProduct_arm::create_pipeline(const Option& opt)
{
{
flatten = ncnn::create_layer(ncnn::LayerType::Flatten);
flatten = ncnn::create_layer_cpu(ncnn::LayerType::Flatten);

ncnn::ParamDict pd;

Expand Down
2 changes: 1 addition & 1 deletion src/layer/arm/matmul_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ MatMul_arm::MatMul_arm()

int MatMul_arm::create_pipeline(const Option& opt)
{
gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);

ncnn::ParamDict pd;
pd.set(2, 0); // transA
Expand Down
14 changes: 7 additions & 7 deletions src/layer/arm/multiheadattention_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ int MultiHeadAttention_arm::create_pipeline(const Option& _opt)
opt.use_bf16_storage &= support_bf16_storage;

{
qk_softmax = ncnn::create_layer(ncnn::LayerType::Softmax);
qk_softmax = ncnn::create_layer_cpu(ncnn::LayerType::Softmax);
ncnn::ParamDict pd;
pd.set(0, -1);
pd.set(1, 1);
Expand All @@ -61,7 +61,7 @@ int MultiHeadAttention_arm::create_pipeline(const Option& _opt)
const int embed_dim_per_head = embed_dim / num_heads;
const float inv_sqrt_embed_dim_per_head = 1.f / sqrtf(embed_dim_per_head);

q_gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
q_gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(0, inv_sqrt_embed_dim_per_head);
pd.set(1, 1.f);
Expand Down Expand Up @@ -92,7 +92,7 @@ int MultiHeadAttention_arm::create_pipeline(const Option& _opt)
}

{
k_gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
k_gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, 1); // transB
Expand Down Expand Up @@ -121,7 +121,7 @@ int MultiHeadAttention_arm::create_pipeline(const Option& _opt)
}

{
v_gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
v_gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, 1); // transB
Expand Down Expand Up @@ -150,7 +150,7 @@ int MultiHeadAttention_arm::create_pipeline(const Option& _opt)
}

{
o_gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
o_gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 1); // transA
pd.set(3, 1); // transB
Expand All @@ -177,7 +177,7 @@ int MultiHeadAttention_arm::create_pipeline(const Option& _opt)
}

{
qk_gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
qk_gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 1); // transA
pd.set(3, 0); // transB
Expand All @@ -198,7 +198,7 @@ int MultiHeadAttention_arm::create_pipeline(const Option& _opt)
}

{
qkv_gemm = ncnn::create_layer(ncnn::LayerType::Gemm);
qkv_gemm = ncnn::create_layer_cpu(ncnn::LayerType::Gemm);
ncnn::ParamDict pd;
pd.set(2, 0); // transA
pd.set(3, 1); // transB
Expand Down
2 changes: 1 addition & 1 deletion src/layer/convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ int Convolution::forward(const Mat& bottom_blob, Mat& top_blob, const Option& op
if (bottom_blob.w * bottom_blob.elempack == num_input)
{
// call InnerProduct
ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::InnerProduct);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::InnerProduct);

// set param
ncnn::ParamDict pd;
Expand Down
12 changes: 6 additions & 6 deletions src/layer/fused_activation.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,22 +80,22 @@ static ncnn::Layer* create_activation_layer(int activation_type, const ncnn::Mat

if (activation_type == 1)
{
activation = ncnn::create_layer(ncnn::LayerType::ReLU);
activation = ncnn::create_layer_cpu(ncnn::LayerType::ReLU);

ncnn::ParamDict pd;
activation->load_param(pd);
}
else if (activation_type == 2)
{
activation = ncnn::create_layer(ncnn::LayerType::ReLU);
activation = ncnn::create_layer_cpu(ncnn::LayerType::ReLU);

ncnn::ParamDict pd;
pd.set(0, activation_params[0]); // slope
activation->load_param(pd);
}
else if (activation_type == 3)
{
activation = ncnn::create_layer(ncnn::LayerType::Clip);
activation = ncnn::create_layer_cpu(ncnn::LayerType::Clip);

ncnn::ParamDict pd;
pd.set(0, activation_params[0]); // min
Expand All @@ -105,21 +105,21 @@ static ncnn::Layer* create_activation_layer(int activation_type, const ncnn::Mat
}
else if (activation_type == 4)
{
activation = ncnn::create_layer(ncnn::LayerType::Sigmoid);
activation = ncnn::create_layer_cpu(ncnn::LayerType::Sigmoid);

ncnn::ParamDict pd;
activation->load_param(pd);
}
else if (activation_type == 5)
{
activation = ncnn::create_layer(ncnn::LayerType::Mish);
activation = ncnn::create_layer_cpu(ncnn::LayerType::Mish);

ncnn::ParamDict pd;
activation->load_param(pd);
}
else if (activation_type == 6)
{
activation = ncnn::create_layer(ncnn::LayerType::HardSwish);
activation = ncnn::create_layer_cpu(ncnn::LayerType::HardSwish);

ncnn::ParamDict pd;
pd.set(0, activation_params[0]); // alpha
Expand Down
2 changes: 1 addition & 1 deletion src/layer/loongarch/convolution1d_loongarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ int Convolution1D_loongarch::forward(const std::vector<Mat>& bottom_blobs, std::
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution1D);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution1D);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/loongarch/convolution_loongarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ int Convolution_loongarch::forward(const std::vector<Mat>& bottom_blobs, std::ve
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/loongarch/convolutiondepthwise_loongarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ int ConvolutionDepthWise_loongarch::create_group_ops(const Option& opt)
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);

// set param
ncnn::ParamDict pd;
Expand Down Expand Up @@ -537,7 +537,7 @@ int ConvolutionDepthWise_loongarch::forward(const std::vector<Mat>& bottom_blobs
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::ConvolutionDepthWise);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/loongarch/deconvolution_loongarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ int Deconvolution_loongarch::forward(const std::vector<Mat>& bottom_blobs, std::
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/loongarch/deconvolutiondepthwise_loongarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ int DeconvolutionDepthWise_loongarch::create_group_ops(const Option& opt)
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);

// set param
ncnn::ParamDict pd;
Expand Down Expand Up @@ -476,7 +476,7 @@ int DeconvolutionDepthWise_loongarch::forward(const std::vector<Mat>& bottom_blo
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::DeconvolutionDepthWise);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::DeconvolutionDepthWise);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/loongarch/innerproduct_loongarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ InnerProduct_loongarch::InnerProduct_loongarch()
int InnerProduct_loongarch::create_pipeline(const Option& opt)
{
{
flatten = ncnn::create_layer(ncnn::LayerType::Flatten);
flatten = ncnn::create_layer_cpu(ncnn::LayerType::Flatten);

ncnn::ParamDict pd;

Expand Down
2 changes: 1 addition & 1 deletion src/layer/mips/convolution1d_mips.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ int Convolution1D_mips::forward(const std::vector<Mat>& bottom_blobs, std::vecto
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution1D);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution1D);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/mips/convolution_mips.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ int Convolution_mips::forward(const std::vector<Mat>& bottom_blobs, std::vector<
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/mips/convolutiondepthwise_mips.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ int ConvolutionDepthWise_mips::create_group_ops(const Option& opt)
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution);

// set param
ncnn::ParamDict pd;
Expand Down Expand Up @@ -537,7 +537,7 @@ int ConvolutionDepthWise_mips::forward(const std::vector<Mat>& bottom_blobs, std
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::ConvolutionDepthWise);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::ConvolutionDepthWise);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/mips/deconvolution_mips.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ int Deconvolution_mips::forward(const std::vector<Mat>& bottom_blobs, std::vecto
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
4 changes: 2 additions & 2 deletions src/layer/mips/deconvolutiondepthwise_mips.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ int DeconvolutionDepthWise_mips::create_group_ops(const Option& opt)
if (bias_term)
bias_data_g = bias_data.range(num_output_g * g, num_output_g);

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Deconvolution);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Deconvolution);

// set param
ncnn::ParamDict pd;
Expand Down Expand Up @@ -476,7 +476,7 @@ int DeconvolutionDepthWise_mips::forward(const std::vector<Mat>& bottom_blobs, s
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::DeconvolutionDepthWise);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::DeconvolutionDepthWise);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
2 changes: 1 addition & 1 deletion src/layer/mips/innerproduct_mips.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ InnerProduct_mips::InnerProduct_mips()
int InnerProduct_mips::create_pipeline(const Option& opt)
{
{
flatten = ncnn::create_layer(ncnn::LayerType::Flatten);
flatten = ncnn::create_layer_cpu(ncnn::LayerType::Flatten);

ncnn::ParamDict pd;

Expand Down
2 changes: 1 addition & 1 deletion src/layer/riscv/convolution1d_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ int Convolution1D_riscv::forward(const std::vector<Mat>& bottom_blobs, std::vect
bias_data_flattened.elempack = 1;
}

ncnn::Layer* op = ncnn::create_layer(ncnn::LayerType::Convolution1D);
ncnn::Layer* op = ncnn::create_layer_cpu(ncnn::LayerType::Convolution1D);

ncnn::ParamDict pd;
pd.set(0, _num_output);
Expand Down
Loading

0 comments on commit 8a29df6

Please sign in to comment.