diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 46346c4cd7ffc..cebb08e49afa3 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -10195,22 +10195,22 @@ void CodeGen::genArm64EmitterUnitTests() // IF_SVE_CN_3A theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, - INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR); /* CLASTA , , , . */ + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR); /* CLASTA , , , . */ theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, - INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR); /* CLASTB , , , . */ + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR); /* CLASTB , , , . */ theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, - INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR); /* CLASTB , , , . */ + INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR); /* CLASTB , , , . */ // IF_SVE_CO_3A // Note: EA_4BYTE used for B and H (destination register is W) theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, - INS_OPTS_SCALABLE_B_TO_SCALAR); /* CLASTA , , , . */ + INS_OPTS_SCALABLE_B_WITH_SCALAR); /* CLASTA , , , . */ theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, - INS_OPTS_SCALABLE_H_TO_SCALAR); /* CLASTA , , , . */ + INS_OPTS_SCALABLE_H_WITH_SCALAR); /* CLASTA , , , . */ theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, - INS_OPTS_SCALABLE_S_TO_SCALAR); /* CLASTB , , , . */ + INS_OPTS_SCALABLE_S_WITH_SCALAR); /* CLASTB , , , . */ theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, - INS_OPTS_SCALABLE_D_TO_SCALAR); /* CLASTB , , , . */ + INS_OPTS_SCALABLE_D_WITH_SCALAR); /* CLASTB , , , . */ // IF_SVE_EP_3A theEmitter->emitIns_R_R_R(INS_sve_shadd, EA_SCALABLE, REG_V15, REG_P0, REG_V10, @@ -10300,11 +10300,11 @@ void CodeGen::genArm64EmitterUnitTests() // IF_SVE_HJ_3A theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_2BYTE, REG_V21, REG_P6, REG_V14, - INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR); /* FADDA , , , . */ + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR); /* FADDA , , , . */ theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_4BYTE, REG_V22, REG_P5, REG_V13, - INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR); /* FADDA , , , . */ + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR); /* FADDA , , , . */ theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_8BYTE, REG_V23, REG_P4, REG_V12, - INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR); /* FADDA , , , . */ + INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR); /* FADDA , , , . */ // IF_SVE_HL_3A theEmitter->emitIns_R_R_R(INS_sve_fabd, EA_SCALABLE, REG_V24, REG_P3, REG_V11, @@ -10342,59 +10342,59 @@ void CodeGen::genArm64EmitterUnitTests() // IF_SVE_AF_3A theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, - INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR); /* ANDV , , . */ + INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR); /* ANDV , , . */ theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, - INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR); /* EORV , , . */ + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR); /* EORV , , . */ theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, - INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR); /* ORV , , . */ + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR); /* ORV , , . */ theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, - INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR); /* ORV , , . */ + INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR); /* ORV , , . */ // IF_SVE_AG_3A #ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED - theEmitter->emitIns_R_R_R(INS_sve_andqv, EA_1BYTE, REG_V4, REG_P4, REG_V4, INS_OPTS_SCALABLE_B_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_andqv, EA_8BYTE, REG_V4, REG_P4, REG_V4, INS_OPTS_SCALABLE_B_WITH_SIMD_VECTOR); /* ANDQV ., , . */ - theEmitter->emitIns_R_R_R(INS_sve_eorqv, EA_2BYTE, REG_V5, REG_P5, REG_V5, INS_OPTS_SCALABLE_H_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_eorqv, EA_8BYTE, REG_V5, REG_P5, REG_V5, INS_OPTS_SCALABLE_H_WITH_SIMD_VECTOR); /* EORQV ., , . */ - theEmitter->emitIns_R_R_R(INS_sve_orqv, EA_4BYTE, REG_V6, REG_P6, REG_V6, INS_OPTS_SCALABLE_S_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_orqv, EA_8BYTE, REG_V6, REG_P6, REG_V6, INS_OPTS_SCALABLE_S_WITH_SIMD_VECTOR); /* ORQV ., , . */ - theEmitter->emitIns_R_R_R(INS_sve_orqv, EA_8BYTE, REG_V7, REG_P7, REG_V7, INS_OPTS_SCALABLE_D_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_orqv, EA_8BYTE, REG_V7, REG_P7, REG_V7, INS_OPTS_SCALABLE_D_WITH_SIMD_VECTOR); /* ORQV ., , . */ #endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED // IF_SVE_AI_3A theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_1BYTE, REG_V1, REG_P4, REG_V2, - INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR); /* SADDV
, , . */ + INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR); /* SADDV
, , . */ theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_2BYTE, REG_V2, REG_P5, REG_V3, - INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR); /* SADDV
, , . */ + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR); /* SADDV
, , . */ theEmitter->emitIns_R_R_R(INS_sve_uaddv, EA_4BYTE, REG_V3, REG_P6, REG_V4, - INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR); /* UADDV
, , . */ + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR); /* UADDV
, , . */ // IF_SVE_AJ_3A #ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED - theEmitter->emitIns_R_R_R(INS_sve_addqv, EA_8BYTE, REG_V21, REG_V7, REG_P22, INS_OPTS_SCALABLE_B_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_addqv, EA_8BYTE, REG_V21, REG_V7, REG_P22, INS_OPTS_SCALABLE_B_WITH_SIMD_VECTOR); /* ADDQV ., , . */ #endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED // IF_SVE_AK_3A theEmitter->emitIns_R_R_R(INS_sve_smaxv, EA_8BYTE, REG_V15, REG_P7, REG_V4, - INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR); /* SMAXV , , . */ + INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR); /* SMAXV , , . */ theEmitter->emitIns_R_R_R(INS_sve_sminv, EA_4BYTE, REG_V16, REG_P6, REG_V14, - INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR); /* SMINV , , . */ + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR); /* SMINV , , . */ theEmitter->emitIns_R_R_R(INS_sve_umaxv, EA_2BYTE, REG_V17, REG_P5, REG_V24, - INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR); /* UMAXV , , . */ + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR); /* UMAXV , , . */ theEmitter->emitIns_R_R_R(INS_sve_uminv, EA_1BYTE, REG_V18, REG_P4, REG_V31, - INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR); /* UMINV , , . */ + INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR); /* UMINV , , . */ // IF_SVE_AL_3A #ifdef ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED - theEmitter->emitIns_R_R_R(INS_sve_smaxqv, EA_1BYTE, REG_V0, REG_P5, REG_V25, INS_OPTS_SCALABLE_B_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_smaxqv, EA_8BYTE, REG_V0, REG_P5, REG_V25, INS_OPTS_SCALABLE_B_WITH_SIMD_VECTOR); /* SMAXQV ., , . */ - theEmitter->emitIns_R_R_R(INS_sve_sminqv, EA_2BYTE, REG_V1, REG_P4, REG_V24, INS_OPTS_SCALABLE_H_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_sminqv, EA_8BYTE, REG_V1, REG_P4, REG_V24, INS_OPTS_SCALABLE_H_WITH_SIMD_VECTOR); /* SMINQV ., , . */ - theEmitter->emitIns_R_R_R(INS_sve_umaxqv, EA_4BYTE, REG_V2, REG_P3, REG_V23, INS_OPTS_SCALABLE_S_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_umaxqv, EA_8BYTE, REG_V2, REG_P3, REG_V23, INS_OPTS_SCALABLE_S_WITH_SIMD_VECTOR); /* UMAXQV ., , . */ - theEmitter->emitIns_R_R_R(INS_sve_uminqv, EA_8BYTE, REG_V3, REG_P2, REG_V22, INS_OPTS_SCALABLE_D_TO_SIMD_VECTOR); + theEmitter->emitIns_R_R_R(INS_sve_uminqv, EA_8BYTE, REG_V3, REG_P2, REG_V22, INS_OPTS_SCALABLE_D_WITH_SIMD_VECTOR); /* UMINQV ., , . */ #endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE_UNSUPPORTED @@ -10444,6 +10444,76 @@ void CodeGen::genArm64EmitterUnitTests() theEmitter->emitIns_R_R_R(INS_sve_uxtw, EA_SCALABLE, REG_V17, REG_P6, REG_V14, INS_OPTS_SCALABLE_D); /* UXTW .D, /M, .D */ + // IF_SVE_CL_3A + theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V16, REG_P7, REG_V13, + INS_OPTS_SCALABLE_S); /* COMPACT ., , . */ + theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V15, REG_P0, REG_V12, + INS_OPTS_SCALABLE_D); /* COMPACT ., , . */ + + // IF_SVE_CP_3A + theEmitter->emitIns_R_R_R(INS_sve_cpy, EA_1BYTE, REG_V14, REG_P1, REG_V11, + INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR); /* CPY ., /M, */ + theEmitter->emitIns_R_R_R(INS_sve_cpy, EA_4BYTE, REG_V13, REG_P2, REG_V10, + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR); /* CPY ., /M, */ + theEmitter->emitIns_R_R_R(INS_sve_mov, EA_2BYTE, REG_V12, REG_P3, REG_V9, + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR); /* MOV ., /M, */ + theEmitter->emitIns_R_R_R(INS_sve_mov, EA_8BYTE, REG_V11, REG_P4, REG_V8, + INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR); /* MOV ., /M, */ + + // IF_SVE_CQ_3A + // Note: EA_4BYTE used for B and H (source register is W) + theEmitter->emitIns_R_R_R(INS_sve_cpy, EA_8BYTE, REG_V10, REG_P5, REG_SP, + INS_OPTS_SCALABLE_D_WITH_SCALAR); /* CPY ., /M, */ + theEmitter->emitIns_R_R_R(INS_sve_cpy, EA_4BYTE, REG_V9, REG_P6, REG_R30, + INS_OPTS_SCALABLE_H_WITH_SCALAR); /* CPY ., /M, */ + theEmitter->emitIns_R_R_R(INS_sve_mov, EA_4BYTE, REG_V8, REG_P7, REG_R29, + INS_OPTS_SCALABLE_S_WITH_SCALAR); /* MOV ., /M, */ + theEmitter->emitIns_R_R_R(INS_sve_mov, EA_4BYTE, REG_V7, REG_P0, REG_R28, + INS_OPTS_SCALABLE_B_WITH_SCALAR); /* MOV ., /M, */ + + // IF_SVE_CR_3A + theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, + INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR); /* LASTA , , . */ + theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR); /* LASTA , , . */ + theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR); /* LASTB , , . */ + theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, + INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR); /* LASTB , , . */ + + // IF_SVE_CS_3A + // Note: EA_4BYTE used for B and H (source register is W) + theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, + INS_OPTS_SCALABLE_B_WITH_SCALAR); /* LASTA , , . */ + theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, + INS_OPTS_SCALABLE_S_WITH_SCALAR); /* LASTA , , . */ + theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, + INS_OPTS_SCALABLE_H_WITH_SCALAR); /* LASTB , , . */ + theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, + INS_OPTS_SCALABLE_D_WITH_SCALAR); /* LASTB , , . */ + + // IF_SVE_CU_3A + theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, + INS_OPTS_SCALABLE_H); /* RBIT ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, + INS_OPTS_SCALABLE_B); /* RBIT ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, + INS_OPTS_SCALABLE_S); /* RBIT ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, + INS_OPTS_SCALABLE_D); /* RBIT ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, + INS_OPTS_SCALABLE_H); /* REVB ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, + INS_OPTS_SCALABLE_S); /* REVB ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, + INS_OPTS_SCALABLE_D); /* REVB ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, + INS_OPTS_SCALABLE_S); /* REVH ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, + INS_OPTS_SCALABLE_D); /* REVH ., /M, . */ + theEmitter->emitIns_R_R_R(INS_sve_revw, EA_SCALABLE, REG_V25, REG_P4, REG_V16, + INS_OPTS_SCALABLE_D); /* REVW .D, /M, .D */ + #endif // ALL_ARM64_EMITTER_UNIT_TESTS_SVE #ifdef ALL_ARM64_EMITTER_UNIT_TESTS diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index c2ebe6030a6da..8b11d66505adb 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -965,6 +965,7 @@ void emitter::emitInsSanityCheck(instrDesc* id) // Scalable, .S or .D. case IF_SVE_AC_3A: // ........xx...... ...gggmmmmmddddd -- SVE integer divide vectors (predicated) + case IF_SVE_CL_3A: // ........xx...... ...gggnnnnnddddd -- SVE compress active elements elemsize = id->idOpSize(); assert(insOptsScalableWords(id->idInsOpt())); // xx assert(isVectorRegister(id->idReg1())); // ddddd @@ -983,35 +984,39 @@ void emitter::emitInsSanityCheck(instrDesc* id) assert(isScalableVectorSize(elemsize)); break; - // Scalable to SIMD scalar. + // Scalable to/from SIMD scalar. case IF_SVE_CN_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally extract element to SIMD&FP scalar + case IF_SVE_CP_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy SIMD&FP scalar register to vector + // (predicated) + case IF_SVE_CR_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to SIMD&FP scalar register case IF_SVE_AF_3A: // ........xx...... ...gggnnnnnddddd -- SVE bitwise logical reduction (predicated) case IF_SVE_AK_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer min/max reduction (predicated) elemsize = id->idOpSize(); - assert(insOptsScalableToSimdScalar(id->idInsOpt())); // xx - assert(isVectorRegister(id->idReg1())); // ddddd - assert(isLowPredicateRegister(id->idReg2())); // ggg - assert(isVectorRegister(id->idReg3())); // mmmmm + assert(insOptsScalableWithSimdScalar(id->idInsOpt())); // xx + assert(isVectorRegister(id->idReg1())); // ddddd + assert(isLowPredicateRegister(id->idReg2())); // ggg + assert(isVectorRegister(id->idReg3())); // mmmmm assert(isValidVectorElemsize(elemsize)); break; // Scalable to FP SIMD scalar. case IF_SVE_HJ_3A: // ........xx...... ...gggmmmmmddddd -- SVE floating-point serial reduction (predicated) elemsize = id->idOpSize(); - assert(insOptsScalableToSimdFPScalar(id->idInsOpt())); // xx - assert(isVectorRegister(id->idReg1())); // ddddd - assert(isLowPredicateRegister(id->idReg2())); // ggg - assert(isVectorRegister(id->idReg3())); // mmmmm + assert(insOptsScalableWithSimdFPScalar(id->idInsOpt())); // xx + assert(isVectorRegister(id->idReg1())); // ddddd + assert(isLowPredicateRegister(id->idReg2())); // ggg + assert(isVectorRegister(id->idReg3())); // mmmmm assert(isValidVectorElemsizeSveFloat(elemsize)); break; // Scalable to general register. case IF_SVE_CO_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally extract element to general register + case IF_SVE_CS_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to general register elemsize = id->idOpSize(); - assert(insOptsScalableToScalar(id->idInsOpt())); // xx - assert(isGeneralRegister(id->idReg1())); // ddddd - assert(isLowPredicateRegister(id->idReg2())); // ggg - assert(isVectorRegister(id->idReg3())); // mmmmm + assert(insOptsScalableWithScalar(id->idInsOpt())); // xx + assert(isGeneralRegister(id->idReg1())); // ddddd + assert(isLowPredicateRegister(id->idReg2())); // ggg + assert(isVectorRegister(id->idReg3())); // mmmmm assert(isValidScalarDatasize(elemsize)); break; @@ -1030,12 +1035,12 @@ void emitter::emitInsSanityCheck(instrDesc* id) case IF_SVE_AG_3A: // ........xx...... ...gggnnnnnddddd -- SVE bitwise logical reduction (quadwords) case IF_SVE_AJ_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer add reduction (quadwords) case IF_SVE_AL_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer min/max reduction (quadwords) - elemsize = id->idOpSize(); - assert(insOptsScalableToSimdVector(id->idInsOpt())); // xx - assert(isVectorRegister(id->idReg1())); // ddddd - assert(isLowPredicateRegister(id->idReg2())); // ggg - assert(isVectorRegister(id->idReg3())); // mmmmm - assert(isValidVectorElemsize(elemsize)); + datasize = id->idOpSize(); + assert(insOptsScalableWithSimdVector(id->idInsOpt())); // xx + assert(isVectorRegister(id->idReg1())); // ddddd + assert(isLowPredicateRegister(id->idReg2())); // ggg + assert(isVectorRegister(id->idReg3())); // mmmmm + assert(datasize == EA_8BYTE); break; // Scalable, widening to scalar SIMD. @@ -1068,21 +1073,26 @@ void emitter::emitInsSanityCheck(instrDesc* id) assert(isScalableVectorSize(elemsize)); break; + // Scalable, various sizes. case IF_SVE_AQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer unary operations (predicated) + case IF_SVE_CU_3A: // ........xx...... ...gggnnnnnddddd -- SVE reverse within elements switch (id->idIns()) { case INS_sve_abs: case INS_sve_neg: + case INS_sve_rbit: assert(insOptsScalableSimple(id->idInsOpt())); break; case INS_sve_sxtb: case INS_sve_uxtb: + case INS_sve_revb: assert(insOptsScalableAtLeastHalf(id->idInsOpt())); break; case INS_sve_sxth: case INS_sve_uxth: + case INS_sve_revh: assert(insOptsScalableWords(id->idInsOpt())); break; @@ -1097,6 +1107,16 @@ void emitter::emitInsSanityCheck(instrDesc* id) assert(isScalableVectorSize(elemsize)); break; + // Scalable from general scalar (possibly SP) + case IF_SVE_CQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy general register to vector (predicated) + elemsize = id->idOpSize(); + assert(insOptsScalableWithScalar(id->idInsOpt())); // xx + assert(isVectorRegister(id->idReg1())); // ddddd + assert(isLowPredicateRegister(id->idReg2())); // ggg + assert(isGeneralRegisterOrZR(id->idReg3())); // mmmmm + assert(isValidScalarDatasize(elemsize)); + break; + default: printf("unexpected format %s\n", emitIfName(id->idInsFmt())); assert(!"Unexpected format"); @@ -8263,7 +8283,7 @@ void emitter::emitIns_R_R_R( assert(isFloatReg(reg1)); assert(isLowPredicateRegister(reg2)); assert(isVectorRegister(reg3)); - assert(insOptsScalableToSimdScalar(opt)); + assert(insOptsScalableWithSimdScalar(opt)); fmt = IF_SVE_AF_3A; break; @@ -8274,7 +8294,7 @@ void emitter::emitIns_R_R_R( assert(isVectorRegister(reg1)); assert(isLowPredicateRegister(reg2)); assert(isVectorRegister(reg3)); - assert(insOptsScalableToSimdVector(opt)); + assert(insOptsScalableWithSimdVector(opt)); fmt = IF_SVE_AG_3A; break; @@ -8283,7 +8303,7 @@ void emitter::emitIns_R_R_R( assert(isFloatReg(reg1)); assert(isLowPredicateRegister(reg2)); assert(isVectorRegister(reg3)); - assert(insOptsScalableToSimdScalar(opt)); + assert(insOptsScalableWithSimdScalar(opt)); fmt = IF_SVE_AI_3A; break; @@ -8292,7 +8312,7 @@ void emitter::emitIns_R_R_R( assert(isVectorRegister(reg1)); assert(isLowPredicateRegister(reg2)); assert(isVectorRegister(reg3)); - assert(insOptsScalableToSimdVector(opt)); + assert(insOptsScalableWithSimdVector(opt)); fmt = IF_SVE_AJ_3A; break; @@ -8303,7 +8323,7 @@ void emitter::emitIns_R_R_R( assert(isFloatReg(reg1)); assert(isLowPredicateRegister(reg2)); assert(isVectorRegister(reg3)); - assert(insOptsScalableToSimdScalar(opt)); + assert(insOptsScalableWithSimdScalar(opt)); fmt = IF_SVE_AK_3A; break; @@ -8315,7 +8335,7 @@ void emitter::emitIns_R_R_R( assert(isVectorRegister(reg1)); assert(isLowPredicateRegister(reg2)); assert(isVectorRegister(reg3)); - assert(insOptsScalableToSimdVector(opt)); + assert(insOptsScalableWithSimdVector(opt)); fmt = IF_SVE_AL_3A; break; @@ -8403,6 +8423,14 @@ void emitter::emitIns_R_R_R( fmt = IF_SVE_AQ_3A; break; + case INS_sve_compact: + assert(isVectorRegister(reg1)); + assert(isLowPredicateRegister(reg2)); + assert(isVectorRegister(reg3)); + assert(insOptsScalableWords(opt)); + fmt = IF_SVE_CL_3A; + break; + case INS_sve_clasta: case INS_sve_clastb: assert(isLowPredicateRegister(reg2)); @@ -8412,7 +8440,7 @@ void emitter::emitIns_R_R_R( assert(isVectorRegister(reg1)); fmt = IF_SVE_CM_3A; } - else if (insOptsScalableToSimdScalar(opt)) + else if (insOptsScalableWithSimdScalar(opt)) { assert(isFloatReg(reg1)); assert(isValidVectorElemsize(size)); @@ -8420,13 +8448,82 @@ void emitter::emitIns_R_R_R( } else { - assert(insOptsScalableToScalar(opt)); + assert(insOptsScalableWithScalar(opt)); assert(isGeneralRegister(reg1)); assert(isValidScalarDatasize(size)); fmt = IF_SVE_CO_3A; } break; + case INS_sve_cpy: + case INS_sve_mov: + assert(isVectorRegister(reg1)); + assert(isLowPredicateRegister(reg2)); + if (insOptsScalableWithSimdScalar(opt)) + { + assert(isVectorRegister(reg3)); + fmt = IF_SVE_CP_3A; + } + else + { + assert(insOptsScalableWithScalar(opt)); + assert(isGeneralRegisterOrSP(reg3)); + fmt = IF_SVE_CQ_3A; + reg3 = encodingSPtoZR(reg3); + } + // MOV is an alias for CPY, and is always the preferred disassembly. + ins = INS_sve_mov; + break; + + case INS_sve_lasta: + case INS_sve_lastb: + assert(isLowPredicateRegister(reg2)); + assert(isVectorRegister(reg3)); + if (insOptsScalableWithSimdScalar(opt)) + { + assert(isVectorRegister(reg1)); + fmt = IF_SVE_CR_3A; + } + else + { + assert(insOptsScalableWithScalar(opt)); + assert(isGeneralRegister(reg1)); + fmt = IF_SVE_CS_3A; + } + break; + + case INS_sve_rbit: + assert(isVectorRegister(reg1)); + assert(isLowPredicateRegister(reg2)); + assert(isVectorRegister(reg3)); + assert(insOptsScalableSimple(opt)); + fmt = IF_SVE_CU_3A; + break; + + case INS_sve_revb: + assert(isVectorRegister(reg1)); + assert(isLowPredicateRegister(reg2)); + assert(isVectorRegister(reg3)); + assert(insOptsScalableAtLeastHalf(opt)); + fmt = IF_SVE_CU_3A; + break; + + case INS_sve_revh: + assert(isVectorRegister(reg1)); + assert(isLowPredicateRegister(reg2)); + assert(isVectorRegister(reg3)); + assert(insOptsScalableWords(opt)); + fmt = IF_SVE_CU_3A; + break; + + case INS_sve_revw: + assert(isVectorRegister(reg1)); + assert(isLowPredicateRegister(reg2)); + assert(isVectorRegister(reg3)); + assert(opt == INS_OPTS_SCALABLE_D); + fmt = IF_SVE_CU_3A; + break; + case INS_sve_shadd: case INS_sve_shsub: case INS_sve_shsubr: @@ -8504,7 +8601,7 @@ void emitter::emitIns_R_R_R( assert(isFloatReg(reg1)); assert(isLowPredicateRegister(reg2)); assert(isVectorRegister(reg3)); - assert(insOptsScalableToSimdFPScalar(opt)); + assert(insOptsScalableWithSimdFPScalar(opt)); assert(isValidVectorElemsizeSveFloat(size)); fmt = IF_SVE_HJ_3A; break; @@ -12161,29 +12258,29 @@ void emitter::emitIns_Call(EmitCallType callType, { case INS_OPTS_SCALABLE_B: case INS_OPTS_SCALABLE_WIDE_B: - case INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_B_TO_SIMD_VECTOR: - case INS_OPTS_SCALABLE_B_TO_SCALAR: + case INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_B_WITH_SIMD_VECTOR: + case INS_OPTS_SCALABLE_B_WITH_SCALAR: return 0x00000000; case INS_OPTS_SCALABLE_H: case INS_OPTS_SCALABLE_WIDE_H: - case INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_H_TO_SIMD_VECTOR: - case INS_OPTS_SCALABLE_H_TO_SCALAR: + case INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_H_WITH_SIMD_VECTOR: + case INS_OPTS_SCALABLE_H_WITH_SCALAR: return 0x00400000; // set the bit at location 22 case INS_OPTS_SCALABLE_S: case INS_OPTS_SCALABLE_WIDE_S: - case INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_S_TO_SIMD_VECTOR: - case INS_OPTS_SCALABLE_S_TO_SCALAR: + case INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_S_WITH_SIMD_VECTOR: + case INS_OPTS_SCALABLE_S_WITH_SCALAR: return 0x00800000; // set the bit at location 23 case INS_OPTS_SCALABLE_D: - case INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_D_TO_SIMD_VECTOR: - case INS_OPTS_SCALABLE_D_TO_SCALAR: + case INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_D_WITH_SIMD_VECTOR: + case INS_OPTS_SCALABLE_D_WITH_SCALAR: return 0x00C00000; // set the bit at location 23 and 22 default: @@ -14155,8 +14252,13 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) case IF_SVE_AO_3A: // ........xx...... ...gggmmmmmddddd -- SVE bitwise shift by wide elements (predicated) case IF_SVE_AP_3A: // ........xx...... ...gggnnnnnddddd -- SVE bitwise unary operations (predicated) case IF_SVE_AQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer unary operations (predicated) + case IF_SVE_CL_3A: // ........xx...... ...gggnnnnnddddd -- SVE compress active elements case IF_SVE_CM_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally broadcast element to vector case IF_SVE_CN_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally extract element to SIMD&FP scalar + case IF_SVE_CP_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy SIMD&FP scalar register to vector + // (predicated) + case IF_SVE_CR_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to SIMD&FP scalar register + case IF_SVE_CU_3A: // ........xx...... ...gggnnnnnddddd -- SVE reverse within elements case IF_SVE_EP_3A: // ........xx...... ...gggmmmmmddddd -- SVE2 integer halving add/subtract (predicated) case IF_SVE_ER_3A: // ........xx...... ...gggmmmmmddddd -- SVE2 integer pairwise arithmetic case IF_SVE_ET_3A: // ........xx...... ...gggmmmmmddddd -- SVE2 saturating add/subtract @@ -14175,6 +14277,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) // Scalable to general register. case IF_SVE_CO_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally extract element to general register + case IF_SVE_CS_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to general register code = emitInsCodeSve(ins, fmt); code |= insEncodeReg_Rd(id->idReg1()); // ddddd code |= insEncodeReg_P_12_to_10(id->idReg2()); // ggg @@ -14183,6 +14286,16 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp) dst += emitOutput_Instr(dst, code); break; + // Scalable from general register. + case IF_SVE_CQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy general register to vector (predicated) + code = emitInsCodeSve(ins, fmt); + code |= insEncodeReg_V_4_to_0(id->idReg1()); // ddddd + code |= insEncodeReg_P_12_to_10(id->idReg2()); // ggg + code |= insEncodeReg_Rn(id->idReg3()); // mmmmm + code |= insEncodeSveElemsize(id->idInsOpt()); // xx + dst += emitOutput_Instr(dst, code); + break; + default: assert(!"Unexpected format"); break; @@ -14779,51 +14892,51 @@ void emitter::emitDispArrangement(insOpts opt) str = "8b"; break; case INS_OPTS_16B: - case INS_OPTS_SCALABLE_B_TO_SIMD_VECTOR: + case INS_OPTS_SCALABLE_B_WITH_SIMD_VECTOR: str = "16b"; break; case INS_OPTS_SCALABLE_B: case INS_OPTS_SCALABLE_WIDE_B: - case INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_B_TO_SCALAR: + case INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_B_WITH_SCALAR: str = "b"; break; case INS_OPTS_4H: str = "4h"; break; case INS_OPTS_8H: - case INS_OPTS_SCALABLE_H_TO_SIMD_VECTOR: + case INS_OPTS_SCALABLE_H_WITH_SIMD_VECTOR: str = "8h"; break; case INS_OPTS_SCALABLE_H: case INS_OPTS_SCALABLE_WIDE_H: - case INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_H_TO_SCALAR: + case INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_H_WITH_SCALAR: str = "h"; break; case INS_OPTS_2S: str = "2s"; break; case INS_OPTS_4S: - case INS_OPTS_SCALABLE_S_TO_SIMD_VECTOR: + case INS_OPTS_SCALABLE_S_WITH_SIMD_VECTOR: str = "4s"; break; case INS_OPTS_SCALABLE_S: case INS_OPTS_SCALABLE_WIDE_S: - case INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_S_TO_SCALAR: + case INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_S_WITH_SCALAR: str = "s"; break; case INS_OPTS_1D: str = "1d"; break; case INS_OPTS_2D: - case INS_OPTS_SCALABLE_D_TO_SIMD_VECTOR: + case INS_OPTS_SCALABLE_D_WITH_SIMD_VECTOR: str = "2d"; break; case INS_OPTS_SCALABLE_D: - case INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR: - case INS_OPTS_SCALABLE_D_TO_SCALAR: + case INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR: + case INS_OPTS_SCALABLE_D_WITH_SCALAR: str = "d"; break; @@ -16425,6 +16538,7 @@ void emitter::emitDispInsHelp( break; // , , , . + // , , , . case IF_SVE_CN_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally extract element to SIMD&FP scalar case IF_SVE_CO_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally extract element to general register case IF_SVE_HJ_3A: // ........xx...... ...gggmmmmmddddd -- SVE floating-point serial reduction (predicated) @@ -16435,8 +16549,11 @@ void emitter::emitDispInsHelp( break; // , , . + // , , . case IF_SVE_AF_3A: // ........xx...... ...gggnnnnnddddd -- SVE bitwise logical reduction (predicated) case IF_SVE_AK_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer min/max reduction (predicated) + case IF_SVE_CR_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to SIMD&FP scalar register + case IF_SVE_CS_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to general register emitDispReg(id->idReg1(), size, true); // ddddd emitDispPredicateReg(id->idReg2(), PREDICATE_NONE, true); // ggg emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm @@ -16461,11 +16578,34 @@ void emitter::emitDispInsHelp( // ., /M, . case IF_SVE_AP_3A: // ........xx...... ...gggnnnnnddddd -- SVE bitwise unary operations (predicated) case IF_SVE_AQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE integer unary operations (predicated) + case IF_SVE_CU_3A: // ........xx...... ...gggnnnnnddddd -- SVE reverse within elements emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd emitDispPredicateReg(id->idReg2(), PREDICATE_MERGE, true); // ggg emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm break; + // ., , . + case IF_SVE_CL_3A: // ........xx...... ...gggnnnnnddddd -- SVE compress active elements + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispPredicateReg(id->idReg2(), PREDICATE_NONE, true); // ggg + emitDispSveReg(id->idReg3(), id->idInsOpt(), false); // mmmmm + break; + + // ., /M, + case IF_SVE_CP_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy SIMD&FP scalar register to vector + // (predicated) + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispPredicateReg(id->idReg2(), PREDICATE_MERGE, true); // ggg + emitDispReg(id->idReg3(), size, false); // mmmmm + break; + + // ., /M, + case IF_SVE_CQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy general register to vector (predicated) + emitDispSveReg(id->idReg1(), id->idInsOpt(), true); // ddddd + emitDispPredicateReg(id->idReg2(), PREDICATE_MERGE, true); // ggg + emitDispReg(encodingZRtoSP(id->idReg3()), size, false); // mmmmm + break; + default: printf("unexpected format %s", emitIfName(id->idInsFmt())); assert(!"unexpectedFormat"); @@ -18712,7 +18852,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins case INS_sve_abs: case INS_sve_neg: result.insLatency = PERFSCORE_LATENCY_2C; - result.insThroughput = PERFSCORE_THROUGHPUT_2C; + result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; // Extend, sign or zero @@ -18734,6 +18874,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins break; // Conditional extract operations, SIMD&FP scalar and vector forms + case IF_SVE_CL_3A: // ........xx...... ...gggnnnnnddddd -- SVE compress active elements case IF_SVE_CM_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally broadcast element to vector case IF_SVE_CN_3A: // ........xx...... ...gggmmmmmddddd -- SVE conditionally extract element to SIMD&FP scalar result.insLatency = PERFSCORE_LATENCY_3C; @@ -18746,6 +18887,38 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins result.insThroughput = PERFSCORE_THROUGHPUT_1C; break; + // Copy, scalar SIMD&FP or imm + case IF_SVE_CP_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy SIMD&FP scalar register to vector + // (predicated) + result.insLatency = PERFSCORE_LATENCY_2C; + result.insThroughput = PERFSCORE_THROUGHPUT_2X; + break; + + // Copy, scalar + case IF_SVE_CQ_3A: // ........xx...... ...gggnnnnnddddd -- SVE copy general register to vector (predicated) + result.insLatency = PERFSCORE_LATENCY_5C; + result.insThroughput = PERFSCORE_THROUGHPUT_1C; + break; + + // Extract/insert operation, SIMD and FP scalar form + case IF_SVE_CR_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to SIMD&FP scalar register + result.insLatency = PERFSCORE_LATENCY_3C; + result.insThroughput = PERFSCORE_THROUGHPUT_1C; + break; + + // Extract/insert operation, scalar + case IF_SVE_CS_3A: // ........xx...... ...gggnnnnnddddd -- SVE extract element to general register + result.insLatency = PERFSCORE_LATENCY_5C; + result.insThroughput = PERFSCORE_THROUGHPUT_1C; + break; + + // Count/reverse bits + // Reverse, vector + case IF_SVE_CU_3A: // ........xx...... ...gggnnnnnddddd -- SVE reverse within elements + result.insLatency = PERFSCORE_LATENCY_2C; + result.insThroughput = PERFSCORE_THROUGHPUT_2X; + break; + // Arithmetic, pairwise add // Max/min, basic and pairwise case IF_SVE_ER_3A: // ........xx...... ...gggmmmmmddddd -- SVE2 integer pairwise arithmetic @@ -18756,7 +18929,7 @@ emitter::insExecutionCharacteristics emitter::getInsExecutionCharacteristics(ins // Arithmetic, complex case IF_SVE_ET_3A: // ........xx...... ...gggmmmmmddddd -- SVE2 saturating add/subtract result.insLatency = PERFSCORE_LATENCY_2C; - result.insThroughput = PERFSCORE_THROUGHPUT_2C; + result.insThroughput = PERFSCORE_THROUGHPUT_2X; break; // Arithmetic, shift complex diff --git a/src/coreclr/jit/emitarm64.h b/src/coreclr/jit/emitarm64.h index 7dc101bbeeb20..0a3ef6063b3de 100644 --- a/src/coreclr/jit/emitarm64.h +++ b/src/coreclr/jit/emitarm64.h @@ -862,8 +862,8 @@ inline static bool insOptsConvertIntToFloat(insOpts opt) inline static bool insOptsScalable(insOpts opt) { // Opt is any of the scalable types. - return ((insOptsScalableSimple(opt)) || (insOptsScalableWide(opt)) || (insOptsScalableToSimdScalar(opt)) || - (insOptsScalableToScalar(opt)) || insOptsScalableToSimdVector(opt)); + return ((insOptsScalableSimple(opt)) || (insOptsScalableWide(opt)) || (insOptsScalableWithSimdScalar(opt)) || + (insOptsScalableWithScalar(opt)) || (insOptsScalableWithSimdVector(opt))); } inline static bool insOptsScalableSimple(insOpts opt) @@ -898,39 +898,39 @@ inline static bool insOptsScalableWide(insOpts opt) (opt == INS_OPTS_SCALABLE_WIDE_S)); } -inline static bool insOptsScalableToSimdVector(insOpts opt) +inline static bool insOptsScalableWithSimdVector(insOpts opt) { // `opt` is any of the scalable types that are valid for conversion to an Advsimd SIMD Vector. - return ((opt == INS_OPTS_SCALABLE_B_TO_SIMD_VECTOR) || (opt == INS_OPTS_SCALABLE_H_TO_SIMD_VECTOR) || - (opt == INS_OPTS_SCALABLE_S_TO_SIMD_VECTOR) || (opt == INS_OPTS_SCALABLE_D_TO_SIMD_VECTOR)); + return ((opt == INS_OPTS_SCALABLE_B_WITH_SIMD_VECTOR) || (opt == INS_OPTS_SCALABLE_H_WITH_SIMD_VECTOR) || + (opt == INS_OPTS_SCALABLE_S_WITH_SIMD_VECTOR) || (opt == INS_OPTS_SCALABLE_D_WITH_SIMD_VECTOR)); } -inline static bool insOptsScalableToSimdScalar(insOpts opt) +inline static bool insOptsScalableWithSimdScalar(insOpts opt) { - // `opt` is any of the scalable types that are valid for conversion to a scalar in a SIMD register. - return ((opt == INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR) || - (opt == INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR)); + // `opt` is any of the scalable types that are valid for conversion to/from a scalar in a SIMD register. + return ((opt == INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR) || + (opt == INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR)); } -inline static bool insOptsScalableToSimdFPScalar(insOpts opt) +inline static bool insOptsScalableWithSimdFPScalar(insOpts opt) { - // `opt` is any of the scalable types that are valid for conversion to an FP scalar in a SIMD register. - return ((opt == INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR) || - (opt == INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR)); + // `opt` is any of the scalable types that are valid for conversion to/from a FP scalar in a SIMD register. + return ((opt == INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR) || + (opt == INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR)); } inline static bool insOptsScalableWideningToSimdScalar(insOpts opt) { // `opt` is any of the scalable types that are valid for widening then conversion to a scalar in a SIMD register. - return ((opt == INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR) || - (opt == INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR)); + return ((opt == INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR) || (opt == INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR) || + (opt == INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR)); } -inline static bool insOptsScalableToScalar(insOpts opt) +inline static bool insOptsScalableWithScalar(insOpts opt) { - // `opt` is any of the SIMD scalable types that are valid for conversion to scalar. - return ((opt == INS_OPTS_SCALABLE_B_TO_SCALAR) || (opt == INS_OPTS_SCALABLE_H_TO_SCALAR) || - (opt == INS_OPTS_SCALABLE_S_TO_SCALAR) || (opt == INS_OPTS_SCALABLE_D_TO_SCALAR)); + // `opt` is any of the SIMD scalable types that are valid for conversion to/from a scalar. + return ((opt == INS_OPTS_SCALABLE_B_WITH_SCALAR) || (opt == INS_OPTS_SCALABLE_H_WITH_SCALAR) || + (opt == INS_OPTS_SCALABLE_S_WITH_SCALAR) || (opt == INS_OPTS_SCALABLE_D_WITH_SCALAR)); } static bool isValidImmCond(ssize_t imm); diff --git a/src/coreclr/jit/instr.h b/src/coreclr/jit/instr.h index 62f0c25d13cbb..08f28dfe74270 100644 --- a/src/coreclr/jit/instr.h +++ b/src/coreclr/jit/instr.h @@ -279,20 +279,20 @@ enum insOpts : unsigned INS_OPTS_SCALABLE_WIDE_H, INS_OPTS_SCALABLE_WIDE_S, - INS_OPTS_SCALABLE_B_TO_SIMD_VECTOR, - INS_OPTS_SCALABLE_H_TO_SIMD_VECTOR, - INS_OPTS_SCALABLE_S_TO_SIMD_VECTOR, - INS_OPTS_SCALABLE_D_TO_SIMD_VECTOR, - - INS_OPTS_SCALABLE_B_TO_SIMD_SCALAR, - INS_OPTS_SCALABLE_H_TO_SIMD_SCALAR, - INS_OPTS_SCALABLE_S_TO_SIMD_SCALAR, - INS_OPTS_SCALABLE_D_TO_SIMD_SCALAR, - - INS_OPTS_SCALABLE_B_TO_SCALAR, - INS_OPTS_SCALABLE_H_TO_SCALAR, - INS_OPTS_SCALABLE_S_TO_SCALAR, - INS_OPTS_SCALABLE_D_TO_SCALAR, + INS_OPTS_SCALABLE_B_WITH_SIMD_VECTOR, + INS_OPTS_SCALABLE_H_WITH_SIMD_VECTOR, + INS_OPTS_SCALABLE_S_WITH_SIMD_VECTOR, + INS_OPTS_SCALABLE_D_WITH_SIMD_VECTOR, + + INS_OPTS_SCALABLE_B_WITH_SIMD_SCALAR, + INS_OPTS_SCALABLE_H_WITH_SIMD_SCALAR, + INS_OPTS_SCALABLE_S_WITH_SIMD_SCALAR, + INS_OPTS_SCALABLE_D_WITH_SIMD_SCALAR, + + INS_OPTS_SCALABLE_B_WITH_SCALAR, + INS_OPTS_SCALABLE_H_WITH_SCALAR, + INS_OPTS_SCALABLE_S_WITH_SCALAR, + INS_OPTS_SCALABLE_D_WITH_SCALAR, INS_OPTS_MSL, // Vector Immediate (shifting ones variant)