diff --git a/src/layer/arm/gru_int8.h b/src/layer/arm/gru_int8.h index 388d754677b..78696de401e 100644 --- a/src/layer/arm/gru_int8.h +++ b/src/layer/arm/gru_int8.h @@ -503,7 +503,7 @@ static void gru_int8_gate_output(const Mat& gates, Mat& hidden_state, Mat& top_b "st1 {v0.4h}, [%0] \n" : "=r"(outptr) // %0 : "0"(outptr), - "w"(_gru_H0) + "w"(_gru_H0) : "memory", "v0"); #else // __aarch64__ asm volatile( @@ -511,7 +511,7 @@ static void gru_int8_gate_output(const Mat& gates, Mat& hidden_state, Mat& top_b "vst1.u16 {d0}, [%0] \n" : "=r"(outptr) // %0 : "0"(outptr), - "w"(_gru_H0) + "w"(_gru_H0) : "memory", "q0"); #endif // __aarch64__ #else // NCNN_GNU_INLINE_ASM diff --git a/src/layer/arm/lstm_int8.h b/src/layer/arm/lstm_int8.h index e4cebf0ab22..b58ebfde398 100644 --- a/src/layer/arm/lstm_int8.h +++ b/src/layer/arm/lstm_int8.h @@ -256,7 +256,7 @@ static void lstm_int8_gate_output(const Mat& gates, const Mat& weight_hr, Mat& h "st1 {v0.4h}, [%0] \n" : "=r"(outptr) // %0 : "0"(outptr), - "w"(_lstm_H) + "w"(_lstm_H) : "memory", "v0"); #else // __aarch64__ asm volatile( @@ -264,7 +264,7 @@ static void lstm_int8_gate_output(const Mat& gates, const Mat& weight_hr, Mat& h "vst1.u16 {d0}, [%0] \n" : "=r"(outptr) // %0 : "0"(outptr), - "w"(_lstm_H) + "w"(_lstm_H) : "memory", "q0"); #endif // __aarch64__ #else // NCNN_GNU_INLINE_ASM diff --git a/src/layer/arm/rnn_int8.h b/src/layer/arm/rnn_int8.h index dc96be3e64a..0dbf849fb85 100644 --- a/src/layer/arm/rnn_int8.h +++ b/src/layer/arm/rnn_int8.h @@ -259,7 +259,7 @@ static void rnn_int8_gate_output(const Mat& gates, Mat& hidden_state, Mat& top_b "st1 {v0.4h}, [%0] \n" : "=r"(outptr) // %0 : "0"(outptr), - "w"(_rnn_H) + "w"(_rnn_H) : "memory", "v0"); #else // __aarch64__ asm volatile( @@ -267,7 +267,7 @@ static void rnn_int8_gate_output(const Mat& gates, Mat& hidden_state, Mat& top_b "vst1.u16 {d0}, [%0] \n" : "=r"(outptr) // %0 : "0"(outptr), - "w"(_rnn_H) + "w"(_rnn_H) : "memory", "q0"); #endif // __aarch64__ #else // NCNN_GNU_INLINE_ASM