From 9bb4e10697d5f3e613ca9aee5a9c5d960646f116 Mon Sep 17 00:00:00 2001 From: Brian Sullivan Date: Thu, 23 Jul 2020 17:20:46 -0700 Subject: [PATCH] Implementation of CSE for GT_CNS_INT benefits ARM64 (#39096) * Change the type of csdHashKey to size_t * Update gtCostSz and gtCostEx for constant nodes * Implementation of code size optimization, CSE of constant values for ARM64 Implementation of code size optimization, CSE of constant values for ARM64 We will share a single CSE for constants that differ only in their low 12 bits on ARM64 Number of shared constant low bits set in target.h CSE_CONST_SHARED_LOW_BITS we use 12 bits on Arm platforms and 16 bits on XArch platforms Disable the CSE of the REG_R2R_INDIRECT_PARAM on Arm32 as it hits Assertion failed 'candidates != candidateBit' in lsra.cpp Line: 3723 Config variable: COMPlus_JitConstCSE // Default 0: enable the CSE of Constants, including nearby offsets. (only for ARM64) // If 1, disable all the CSE of Constants // If 2, enable the CSE of Constants but don't combine with nearby offsets. (only for ARM64) // If 3, enable the CSE of Constants including nearby offsets. (all platforms) // If 4, enable the CSE of Constants but don't combine with nearby offsets. (all platforms) // * Added additional Priority 0 test coverage for Floating Point optimizations * Fix for COMPLUS_JitConstCSE=4 * Renamed config variable from COMPlus_JitDisableConstCSE to COMPlus_JitConstCSE * Updated with Codereview feedback, removed sort from Const CSE phase * Fix for assertionProp issue in the refTypesdynamic test --- src/coreclr/src/jit/compiler.h | 22 +- src/coreclr/src/jit/gentree.cpp | 192 ++++++-- src/coreclr/src/jit/jitconfigvalues.h | 14 + src/coreclr/src/jit/morph.cpp | 7 + src/coreclr/src/jit/optcse.cpp | 465 +++++++++++++----- src/coreclr/src/jit/target.h | 9 +- .../Old/Conformance_Base/beq_r4.ilproj | 2 +- .../Old/Conformance_Base/beq_r8.ilproj | 2 +- .../Old/Conformance_Base/bge_r4.ilproj | 2 +- .../Old/Conformance_Base/bge_r8.ilproj | 2 +- .../Old/Conformance_Base/bge_un_r4.ilproj | 2 +- .../Old/Conformance_Base/bge_un_r8.ilproj | 2 +- .../Old/Conformance_Base/bne_un_r4.ilproj | 2 +- .../Old/Conformance_Base/bne_un_r8.ilproj | 2 +- .../JIT/Methodical/NaN/arithm64_cs_d.csproj | 2 +- .../JIT/Methodical/NaN/arithm64_cs_do.csproj | 2 +- .../JIT/Methodical/NaN/arithm64_cs_r.csproj | 2 +- .../JIT/Methodical/NaN/arithm64_cs_ro.csproj | 2 +- 18 files changed, 567 insertions(+), 166 deletions(-) diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h index c5f571838d4b2..9002a3dd31633 100644 --- a/src/coreclr/src/jit/compiler.h +++ b/src/coreclr/src/jit/compiler.h @@ -6326,11 +6326,12 @@ class Compiler struct CSEdsc { - CSEdsc* csdNextInBucket; // used by the hash table - - unsigned csdHashKey; // the orginal hashkey - - unsigned csdIndex; // 1..optCSECandidateCount + CSEdsc* csdNextInBucket; // used by the hash table + size_t csdHashKey; // the orginal hashkey + ssize_t csdConstDefValue; // When we CSE similar constants, this is the value that we use as the def + ValueNum csdConstDefVN; // When we CSE similar constants, this is the ValueNumber that we use for the LclVar + // assignment + unsigned csdIndex; // 1..optCSECandidateCount bool csdLiveAcrossCall; unsigned short csdDefCount; // definition count @@ -6359,6 +6360,7 @@ class Compiler ValueNum defConservNormVN; // if all def occurrences share the same conservative normal value // number, this will reflect it; otherwise, NoVN. + // not used for shared const CSE's }; static const size_t s_optCSEhashSize; @@ -6406,6 +6408,16 @@ class Compiler void optEnsureClearCSEInfo(); #endif // DEBUG + static bool Is_Shared_Const_CSE(size_t key) + { + return ((key & TARGET_SIGN_BIT) != 0); + } + + static size_t Decode_Shared_Const_CSE_Value(size_t key) + { + return (key & ~TARGET_SIGN_BIT) << CSE_CONST_SHARED_LOW_BITS; + } + #endif // FEATURE_ANYCSE #if FEATURE_VALNUM_CSE diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp index 8ca9a49a2ce7d..db3a490f257ad 100644 --- a/src/coreclr/src/jit/gentree.cpp +++ b/src/coreclr/src/jit/gentree.cpp @@ -3250,78 +3250,138 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) switch (oper) { #ifdef TARGET_ARM - case GT_CNS_LNG: - costSz = 9; - costEx = 4; - goto COMMON_CNS; - case GT_CNS_STR: // Uses movw/movt - costSz = 7; - costEx = 3; + costSz = 8; + costEx = 2; goto COMMON_CNS; + case GT_CNS_LNG: + { + GenTreeIntConCommon* con = tree->AsIntConCommon(); + + INT64 lngVal = con->LngValue(); + INT32 loVal = (INT32)(lngVal & 0xffffffff); + INT32 hiVal = (INT32)(lngVal >> 32); + + if (lngVal == 0) + { + costSz = 1; + costEx = 1; + } + else + { + // Minimum of one instruction to setup hiVal, + // and one instruction to setup loVal + costSz = 4 + 4; + costEx = 1 + 1; + + if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)hiVal) && + !codeGen->validImmForInstr(INS_mvn, (target_ssize_t)hiVal)) + { + // Needs extra instruction: movw/movt + costSz += 4; + costEx += 1; + } + + if (!codeGen->validImmForInstr(INS_mov, (target_ssize_t)loVal) && + !codeGen->validImmForInstr(INS_mvn, (target_ssize_t)loVal)) + { + // Needs extra instruction: movw/movt + costSz += 4; + costEx += 1; + } + } + goto COMMON_CNS; + } + case GT_CNS_INT: { // If the constant is a handle then it will need to have a relocation // applied to it. // Any constant that requires a reloc must use the movw/movt sequence // - GenTreeIntConCommon* con = tree->AsIntConCommon(); + GenTreeIntConCommon* con = tree->AsIntConCommon(); + INT32 conVal = con->IconValue(); - if (con->ImmedValNeedsReloc(this) || - !codeGen->validImmForInstr(INS_mov, (target_ssize_t)tree->AsIntCon()->gtIconVal)) + if (con->ImmedValNeedsReloc(this)) { - // Uses movw/movt - costSz = 7; - costEx = 3; + // Requires movw/movt + costSz = 8; + costEx = 2; } - else if (((unsigned)tree->AsIntCon()->gtIconVal) <= 0x00ff) + else if (codeGen->validImmForInstr(INS_add, (target_ssize_t)conVal)) { - // mov Rd, - costSz = 1; + // Typically included with parent oper + costSz = 2; costEx = 1; } - else + else if (codeGen->validImmForInstr(INS_mov, (target_ssize_t)conVal) && + codeGen->validImmForInstr(INS_mvn, (target_ssize_t)conVal)) { - // Uses movw/mvn - costSz = 3; + // Uses mov or mvn + costSz = 4; costEx = 1; } + else + { + // Needs movw/movt + costSz = 8; + costEx = 2; + } goto COMMON_CNS; } #elif defined TARGET_XARCH - case GT_CNS_LNG: - costSz = 10; - costEx = 3; - goto COMMON_CNS; - case GT_CNS_STR: +#ifdef TARGET_AMD64 + costSz = 10; + costEx = 2; +#else // TARGET_X86 costSz = 4; costEx = 1; +#endif goto COMMON_CNS; + case GT_CNS_LNG: case GT_CNS_INT: { + GenTreeIntConCommon* con = tree->AsIntConCommon(); + ssize_t conVal = (oper == GT_CNS_LNG) ? (ssize_t)con->LngValue() : con->IconValue(); + bool fitsInVal = true; + +#ifdef TARGET_X86 + if (oper == GT_CNS_LNG) + { + INT64 lngVal = con->LngValue(); + + conVal = (ssize_t)lngVal; // truncate to 32-bits + + fitsInVal = ((INT64)conVal == lngVal); + } +#endif // TARGET_X86 + // If the constant is a handle then it will need to have a relocation // applied to it. // - GenTreeIntConCommon* con = tree->AsIntConCommon(); - bool iconNeedsReloc = con->ImmedValNeedsReloc(this); - if (!iconNeedsReloc && con->FitsInI8()) + if (iconNeedsReloc) + { + costSz = 4; + costEx = 1; + } + else if (fitsInVal && GenTreeIntConCommon::FitsInI8(conVal)) { costSz = 1; costEx = 1; } -#if defined(TARGET_AMD64) - else if (iconNeedsReloc || !con->FitsInI32()) +#ifdef TARGET_AMD64 + else if (!GenTreeIntConCommon::FitsInI32(conVal)) { costSz = 10; - costEx = 3; + costEx = 2; } #endif // TARGET_AMD64 else @@ -3329,21 +3389,83 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree) costSz = 4; costEx = 1; } +#ifdef TARGET_X86 + if (oper == GT_CNS_LNG) + { + costSz += fitsInVal ? 1 : 4; + costEx += 1; + } +#endif // TARGET_X86 + goto COMMON_CNS; } #elif defined(TARGET_ARM64) - case GT_CNS_LNG: + case GT_CNS_STR: + case GT_CNS_LNG: case GT_CNS_INT: - // TODO-ARM64-NYI: Need cost estimates. - costSz = 1; - costEx = 1; + { + GenTreeIntConCommon* con = tree->AsIntConCommon(); + bool iconNeedsReloc = con->ImmedValNeedsReloc(this); + INT64 imm = con->LngValue(); + emitAttr size = EA_SIZE(emitActualTypeSize(tree)); + + if (iconNeedsReloc) + { + costSz = 8; + costEx = 2; + } + else if (emitter::emitIns_valid_imm_for_add(imm, size)) + { + costSz = 2; + costEx = 1; + } + else if (emitter::emitIns_valid_imm_for_mov(imm, size)) + { + costSz = 4; + costEx = 1; + } + else + { + // Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword + // There are three forms + // movk which loads into any halfword preserving the remaining halfwords + // movz which loads into any halfword zeroing the remaining halfwords + // movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting + // the register + // In some cases it is preferable to use movn, because it has the side effect of filling the + // other halfwords + // with ones + + // Determine whether movn or movz will require the fewest instructions to populate the immediate + bool preferMovz = false; + bool preferMovn = false; + int instructionCount = 4; + + for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16) + { + if (!preferMovn && (uint16_t(imm >> i) == 0x0000)) + { + preferMovz = true; // by using a movk to start we can save one instruction + instructionCount--; + } + else if (!preferMovz && (uint16_t(imm >> i) == 0xffff)) + { + preferMovn = true; // by using a movn to start we can save one instruction + instructionCount--; + } + } + + costEx = instructionCount; + costSz = 4 * instructionCount; + } + } goto COMMON_CNS; #else - case GT_CNS_LNG: case GT_CNS_STR: + case GT_CNS_LNG: case GT_CNS_INT: #error "Unknown TARGET" #endif diff --git a/src/coreclr/src/jit/jitconfigvalues.h b/src/coreclr/src/jit/jitconfigvalues.h index e6c1ab307e460..329f526e90046 100644 --- a/src/coreclr/src/jit/jitconfigvalues.h +++ b/src/coreclr/src/jit/jitconfigvalues.h @@ -285,6 +285,20 @@ CONFIG_INTEGER(JitDisableSimdVN, W("JitDisableSimdVN"), 0) // Default 0, ValueNu // If 3, disable both SIMD and HW Intrinsic nodes #endif // FEATURE_SIMD +// Default 0, enable the CSE of Constants, including nearby offsets. (only for ARM64) +// If 1, disable all the CSE of Constants +// If 2, enable the CSE of Constants but don't combine with nearby offsets. (only for ARM64) +// If 3, enable the CSE of Constants including nearby offsets. (all platforms) +// If 4, enable the CSE of Constants but don't combine with nearby offsets. (all platforms) +// +CONFIG_INTEGER(JitConstCSE, W("JitConstCSE"), 0) + +#define CONST_CSE_ENABLE_ARM64 0 +#define CONST_CSE_DISABLE_ALL 1 +#define CONST_CSE_ENABLE_ARM64_NO_SHARING 2 +#define CONST_CSE_ENABLE_ALL 3 +#define CONST_CSE_ENABLE_ALL_NO_SHARING 4 + /// /// JIT /// diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp index 4d420dabaa809..4bf599b42f349 100644 --- a/src/coreclr/src/jit/morph.cpp +++ b/src/coreclr/src/jit/morph.cpp @@ -2707,6 +2707,13 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd; #endif indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM); +#ifdef TARGET_ARM + // Issue #xxxx : Don't attempt to CSE this constant on ARM32 + // + // This constant has specific register requirements, and LSRA doesn't currently correctly + // handle them when the value is in a CSE'd local. + indirectCellAddress->SetDoNotCSE(); +#endif // TARGET_ARM // Push the stub address onto the list of arguments. call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs); diff --git a/src/coreclr/src/jit/optcse.cpp b/src/coreclr/src/jit/optcse.cpp index 46e834df0dc4f..1b70f7a027b61 100644 --- a/src/coreclr/src/jit/optcse.cpp +++ b/src/coreclr/src/jit/optcse.cpp @@ -401,10 +401,27 @@ void Compiler::optValnumCSE_Init() // unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) { - unsigned key; + size_t key; unsigned hash; unsigned hval; CSEdsc* hashDsc; + bool isIntConstHash = false; + bool enableSharedConstCSE = false; + int configValue = JitConfig.JitConstCSE(); + +#if defined(TARGET_ARM64) + // ARM64 - allow to combine with nearby offsets, when config is not 2 or 4 + if ((configValue != CONST_CSE_ENABLE_ARM64_NO_SHARING) && (configValue != CONST_CSE_ENABLE_ALL_NO_SHARING)) + { + enableSharedConstCSE = true; + } +#endif // TARGET_ARM64 + + // All Platforms - also allow to combine with nearby offsets, when config is 3 + if (configValue == CONST_CSE_ENABLE_ALL) + { + enableSharedConstCSE = true; + } // We use the liberal Value numbers when building the set of CSE ValueNum vnLib = tree->GetVN(VNK_Liberal); @@ -446,11 +463,11 @@ unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) // if (vnOp2Lib != vnLib) { - key = (unsigned)vnLib; // include the exc set in the hash key + key = vnLib; // include the exc set in the hash key } else { - key = (unsigned)vnLibNorm; + key = vnLibNorm; } // If we didn't do the above we would have op1 as the CSE def @@ -459,14 +476,36 @@ unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) // assert(vnLibNorm == vnStore->VNNormalValue(vnOp2Lib)); } - else // Not a GT_COMMA + else if (enableSharedConstCSE && tree->IsIntegralConst()) { - key = (unsigned)vnLibNorm; + assert(vnStore->IsVNConstant(vnLibNorm)); + key = vnStore->CoercedConstantValue(vnLibNorm); + + // We don't shared small offset constants when we require a reloc + if (!tree->AsIntConCommon()->ImmedValNeedsReloc(this)) + { + // Make constants that have the same upper bits use the same key + + // Shift the key right by CSE_CONST_SHARED_LOW_BITS bits, this sets the upper bits to zero + key >>= CSE_CONST_SHARED_LOW_BITS; + } + assert((key & TARGET_SIGN_BIT) == 0); + + // We use the sign bit of 'key' as the flag + // that we are hashing constants (with a shared offset) + key |= TARGET_SIGN_BIT; + } + else // Not a GT_COMMA or a GT_CNS_INT + { + key = vnLibNorm; } // Compute the hash value for the expression - hash = key; + hash = (unsigned)key; +#ifdef TARGET_64BIT + hash ^= (unsigned)(key >> 32); +#endif hash *= (unsigned)(s_optCSEhashSize + 1); hash >>= 7; @@ -480,6 +519,12 @@ unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) { if (hashDsc->csdHashKey == key) { + // Check for mismatched types on GT_CNS_INT nodes + if ((tree->OperGet() == GT_CNS_INT) && (tree->TypeGet() != hashDsc->csdTree->TypeGet())) + { + continue; + } + treeStmtLst* newElem; /* Have we started the list of matching nodes? */ @@ -585,6 +630,8 @@ unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) hashDsc = new (this, CMK_CSE) CSEdsc; hashDsc->csdHashKey = key; + hashDsc->csdConstDefValue = 0; + hashDsc->csdConstDefVN = vnStore->VNForNull(); // uninit value hashDsc->csdIndex = 0; hashDsc->csdLiveAcrossCall = false; hashDsc->csdDefCount = 0; @@ -645,8 +692,17 @@ unsigned Compiler::optValnumCSE_Index(GenTree* tree, Statement* stmt) #ifdef DEBUG if (verbose) { - printf("\nCSE candidate #%02u, vn=", CSEindex); - vnPrint(key, 0); + printf("\nCSE candidate #%02u, key=", CSEindex); + if (!Compiler::Is_Shared_Const_CSE(key)) + { + vnPrint((unsigned)key, 0); + } + else + { + size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(key); + printf("K_%p", dspPtr(kVal)); + } + printf(" in " FMT_BB ", [cost=%2u, size=%2u]: \n", compCurBB->bbNum, tree->GetCostEx(), tree->GetCostSz()); gtDispTree(tree); } @@ -666,6 +722,29 @@ unsigned Compiler::optValnumCSE_Locate() { // Locate CSE candidates and assign them indices + bool enableConstCSE = true; + + int configValue = JitConfig.JitConstCSE(); + + // all platforms - disable CSE of constant values when config is 1 + if (configValue == CONST_CSE_DISABLE_ALL) + { + enableConstCSE = false; + } + +#if !defined(TARGET_ARM64) + // non-ARM64 platforms - disable by default + // + enableConstCSE = false; + + // Check for the two enable cases for all platforms + // + if ((configValue == CONST_CSE_ENABLE_ALL) || (configValue == CONST_CSE_ENABLE_ALL_NO_SHARING)) + { + enableConstCSE = true; + } +#endif + for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext) { /* Make the block publicly available */ @@ -691,6 +770,16 @@ unsigned Compiler::optValnumCSE_Locate() optCseUpdateCheckedBoundMap(tree); } + // Don't allow CSE of constants if it is disabled + // + if (tree->IsIntegralConst()) + { + if (!enableConstCSE) + { + continue; + } + } + if (!optIsCSEcandidate(tree)) { continue; @@ -701,15 +790,17 @@ unsigned Compiler::optValnumCSE_Locate() continue; } - // Don't CSE constant values, instead let the Value Number - // based Assertion Prop phase handle them. Here, unlike - // the rest of optCSE, we use the conservative value number + // We want to CSE simple constant leaf nodes, but we don't want to + // CSE non-leaf trees that compute CSE constant values. + // Instead we let the Value Number based Assertion Prop phase handle them. + // + // Here, unlike the rest of optCSE, we use the conservative value number // rather than the liberal one, since the conservative one // is what the Value Number based Assertion Prop will use // and the point is to avoid optimizing cases that it will // handle. // - if (vnStore->IsVNConstant(vnStore->VNConservativeNormalValue(tree->gtVNPair))) + if (!tree->OperIsLeaf() && vnStore->IsVNConstant(vnStore->VNConservativeNormalValue(tree->gtVNPair))) { continue; } @@ -1428,23 +1519,28 @@ void Compiler::optValnumCSE_Availablity() } } - // Record or update the value of desc->defConservNormVN + // For shared const CSE we don't set/use the defConservNormVN // - ValueNum theConservNormVN = vnStore->VNConservativeNormalValue(tree->gtVNPair); - - // Is defConservNormVN still set to the uninit marker value of VNForNull() ? - if (desc->defConservNormVN == vnStore->VNForNull()) - { - // This is the first def that we have visited, set defConservNormVN - desc->defConservNormVN = theConservNormVN; - } - else + if (!Is_Shared_Const_CSE(desc->csdHashKey)) { - // Check to see if all defs have the same conservative normal VN - if (theConservNormVN != desc->defConservNormVN) + // Record or update the value of desc->defConservNormVN + // + ValueNum theConservNormVN = vnStore->VNConservativeNormalValue(tree->gtVNPair); + + // Is defConservNormVN still set to the uninit marker value of VNForNull() ? + if (desc->defConservNormVN == vnStore->VNForNull()) { - // This candidate has defs with differing conservative normal VNs, mark it with NoVN - desc->defConservNormVN = ValueNumStore::NoVN; // record the marker for differing VNs + // This is the first def that we have visited, set defConservNormVN + desc->defConservNormVN = theConservNormVN; + } + else + { + // Check to see if all defs have the same conservative normal VN + if (theConservNormVN != desc->defConservNormVN) + { + // This candidate has defs with differing conservative normal VNs, mark it with NoVN + desc->defConservNormVN = ValueNumStore::NoVN; // record the marker for differing VNs + } } } @@ -1894,9 +1990,19 @@ class CSE_Heuristic cost = dsc->csdTree->GetCostEx(); } - printf("CSE #%02u, {$%-3x, $%-3x} useCnt=%d: [def=%3u, use=%3u, cost=%3u%s]\n :: ", - dsc->csdIndex, dsc->csdHashKey, dsc->defExcSetPromise, dsc->csdUseCount, def, use, cost, - dsc->csdLiveAcrossCall ? ", call" : " "); + if (!Compiler::Is_Shared_Const_CSE(dsc->csdHashKey)) + { + printf("CSE #%02u, {$%-3x, $%-3x} useCnt=%d: [def=%3u, use=%3u, cost=%3u%s]\n :: ", + dsc->csdIndex, dsc->csdHashKey, dsc->defExcSetPromise, dsc->csdUseCount, def, use, cost, + dsc->csdLiveAcrossCall ? ", call" : " "); + } + else + { + size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(dsc->csdHashKey); + printf("CSE #%02u, {K_%p} useCnt=%d: [def=%3u, use=%3u, cost=%3u%s]\n :: ", dsc->csdIndex, + dspPtr(kVal), dsc->csdUseCount, def, use, cost, + dsc->csdLiveAcrossCall ? ", call" : " "); + } m_pCompiler->gtDispTree(expr, nullptr, nullptr, true); } @@ -2660,8 +2766,7 @@ class CSE_Heuristic // // Later we will unmark any nested CSE's for the CSE uses. // - Compiler::CSEdsc* dsc = successfulCandidate->CseDsc(); - Compiler::treeStmtLst* lst; + Compiler::CSEdsc* dsc = successfulCandidate->CseDsc(); // If there's just a single def for the CSE, we'll put this // CSE into SSA form on the fly. We won't need any PHIs. @@ -2678,53 +2783,122 @@ class CSE_Heuristic cseSsaNum = m_pCompiler->lvaTable[cseLclVarNum].lvPerSsaData.AllocSsaNum(allocator); } -#ifdef DEBUG // Verify that all of the ValueNumbers in this list are correct as // Morph will change them when it performs a mutating operation. // - ValueNum firstVN = ValueNumStore::NoVN; - ValueNum currVN; - bool allSame = true; + bool setRefCnt = true; + bool allSame = true; + bool isSharedConst = Compiler::Is_Shared_Const_CSE(dsc->csdHashKey); + ValueNum bestVN = ValueNumStore::NoVN; + bool bestIsDef = false; + ssize_t bestConstValue = 0; + Compiler::treeStmtLst* lst = dsc->csdTreeList; - lst = dsc->csdTreeList; while (lst != nullptr) { // Ignore this node if the gtCSEnum value has been cleared if (IS_CSE_INDEX(lst->tslTree->gtCSEnum)) { // We used the liberal Value numbers when building the set of CSE - currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); + ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); assert(currVN != ValueNumStore::NoVN); + ssize_t curConstValue = isSharedConst ? m_pCompiler->vnStore->CoercedConstantValue(currVN) : 0; - if (firstVN == ValueNumStore::NoVN) + GenTree* exp = lst->tslTree; + bool isDef = IS_CSE_DEF(exp->gtCSEnum); + + if (bestVN == ValueNumStore::NoVN) { - firstVN = currVN; + // first entry + // set bestVN + bestVN = currVN; + + if (isSharedConst) + { + // set bestConstValue and bestIsDef + bestConstValue = curConstValue; + bestIsDef = isDef; + } } - else if (currVN != firstVN) + else if (currVN != bestVN) { + assert(isSharedConst); // Must be true when we have differing VNs + + // subsequent entry + // clear allSame and check for a lower constant allSame = false; - break; + + ssize_t diff = curConstValue - bestConstValue; + + // The ARM64 ldr addressing modes allow for a subtraction of up to 255 + // so we will allow the diff to be up to -255 before replacing a CSE def + // This will minimize the number of extra subtract instructions. + // + if ((bestIsDef && (diff < -255)) || (!bestIsDef && (diff < 0))) + { + // set new bestVN, bestConstValue and bestIsDef + bestVN = currVN; + bestConstValue = curConstValue; + bestIsDef = isDef; + } + } + + BasicBlock* blk = lst->tslBlock; + BasicBlock::weight_t curWeight = blk->getBBWeight(m_pCompiler); + + if (setRefCnt) + { + m_pCompiler->lvaTable[cseLclVarNum].setLvRefCnt(1); + m_pCompiler->lvaTable[cseLclVarNum].setLvRefCntWtd(curWeight); + setRefCnt = false; + } + else + { + m_pCompiler->lvaTable[cseLclVarNum].incRefCnts(curWeight, m_pCompiler); + } + + // A CSE Def references the LclVar twice + // + if (isDef) + { + m_pCompiler->lvaTable[cseLclVarNum].incRefCnts(curWeight, m_pCompiler); } } lst = lst->tslNext; } - if (!allSame) + + dsc->csdConstDefValue = bestConstValue; + dsc->csdConstDefVN = bestVN; + +#ifdef DEBUG + if (m_pCompiler->verbose) { - lst = dsc->csdTreeList; - GenTree* firstTree = lst->tslTree; - printf("In %s, CSE (oper = %s, type = %s) has differing VNs: ", m_pCompiler->info.compFullName, - GenTree::OpName(firstTree->OperGet()), varTypeName(firstTree->TypeGet())); - while (lst != nullptr) + if (!allSame) { - if (IS_CSE_INDEX(lst->tslTree->gtCSEnum)) + if (isSharedConst) + { + printf("\nWe have shared Const CSE's and selected " FMT_VN " with a value of 0x%p as the base.\n", + dsc->csdConstDefVN, dspPtr(dsc->csdConstDefValue)); + } + else // !isSharedConst { - currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); - printf("0x%x(%s " FMT_VN ") ", lst->tslTree, IS_CSE_USE(lst->tslTree->gtCSEnum) ? "use" : "def", - currVN); + lst = dsc->csdTreeList; + GenTree* firstTree = lst->tslTree; + printf("In %s, CSE (oper = %s, type = %s) has differing VNs: ", m_pCompiler->info.compFullName, + GenTree::OpName(firstTree->OperGet()), varTypeName(firstTree->TypeGet())); + while (lst != nullptr) + { + if (IS_CSE_INDEX(lst->tslTree->gtCSEnum)) + { + ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(lst->tslTree->gtVNPair); + printf("0x%x(%s " FMT_VN ") ", lst->tslTree, + IS_CSE_USE(lst->tslTree->gtCSEnum) ? "use" : "def", currVN); + } + lst = lst->tslNext; + } + printf("\n"); } - lst = lst->tslNext; } - printf("\n"); } #endif // DEBUG @@ -2762,7 +2936,8 @@ class CSE_Heuristic // The cseLclVarType must be a compatible with expTyp // - noway_assert(IsCompatibleType(cseLclVarTyp, expTyp)); + ValueNumStore* vnStore = m_pCompiler->vnStore; + noway_assert(IsCompatibleType(cseLclVarTyp, expTyp) || (dsc->csdConstDefVN != vnStore->VNForNull())); // This will contain the replacement tree for exp // It will either be the CSE def or CSE ref @@ -2790,63 +2965,86 @@ class CSE_Heuristic // We will replace the CSE ref with a new tree // this is typically just a simple use of the new CSE LclVar // - ValueNumStore* vnStore = m_pCompiler->vnStore; - cse = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); - // Assign the ssa num for the use. Note it may be the reserved num. - cse->AsLclVarCommon()->SetSsaNum(cseSsaNum); - - // assign the proper ValueNumber, A CSE use discards any exceptions - cse->gtVNPair = vnStore->VNPNormalPair(exp->gtVNPair); + // Create a reference to the CSE temp + GenTree* cseLclVar = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); + cseLclVar->gtVNPair.SetBoth(dsc->csdConstDefVN); - ValueNum theConservativeVN = successfulCandidate->CseDsc()->defConservNormVN; + // Assign the ssa num for the lclvar use. Note it may be the reserved num. + cseLclVar->AsLclVarCommon()->SetSsaNum(cseSsaNum); - if (theConservativeVN != ValueNumStore::NoVN) + cse = cseLclVar; + if (isSharedConst) { - // All defs of this CSE share the same normal conservative VN, and we are rewriting this - // use to fetch the same value with no reload, so we can safely propagate that - // conservative VN to this use. This can help range check elimination later on. - cse->gtVNPair.SetConservative(theConservativeVN); - - // If the old VN was flagged as a checked bound, propagate that to the new VN - // to make sure assertion prop will pay attention to this VN. - ValueNum oldVN = exp->gtVNPair.GetConservative(); - if (!vnStore->IsVNConstant(theConservativeVN) && vnStore->IsVNCheckedBound(oldVN)) + ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); + ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue(currVN); + ssize_t delta = curValue - dsc->csdConstDefValue; + if (delta != 0) { - vnStore->SetVNIsCheckedBound(theConservativeVN); + GenTree* deltaNode = m_pCompiler->gtNewIconNode(delta, cseLclVarTyp); + cse = m_pCompiler->gtNewOperNode(GT_ADD, cseLclVarTyp, cseLclVar, deltaNode); + cse->SetDoNotCSE(); } + } - GenTree* cmp; - if ((m_pCompiler->optCseCheckedBoundMap != nullptr) && - (m_pCompiler->optCseCheckedBoundMap->Lookup(exp, &cmp))) - { - // Propagate the new value number to this compare node as well, since - // subsequent range check elimination will try to correlate it with - // the other appearances that are getting CSEd. + // assign the proper ValueNumber, A CSE use discards any exceptions + cse->gtVNPair = vnStore->VNPNormalPair(exp->gtVNPair); - ValueNum oldCmpVN = cmp->gtVNPair.GetConservative(); - ValueNum newCmpArgVN; + // shared const CSE has the correct value number assigned + // and both liberal and conservative are identical + // and they do not use theConservativeVN + // + if (!isSharedConst) + { + ValueNum theConservativeVN = successfulCandidate->CseDsc()->defConservNormVN; - ValueNumStore::CompareCheckedBoundArithInfo info; - if (vnStore->IsVNCompareCheckedBound(oldCmpVN)) + if (theConservativeVN != ValueNumStore::NoVN) + { + // All defs of this CSE share the same normal conservative VN, and we are rewriting this + // use to fetch the same value with no reload, so we can safely propagate that + // conservative VN to this use. This can help range check elimination later on. + cse->gtVNPair.SetConservative(theConservativeVN); + + // If the old VN was flagged as a checked bound, propagate that to the new VN + // to make sure assertion prop will pay attention to this VN. + ValueNum oldVN = exp->gtVNPair.GetConservative(); + if (!vnStore->IsVNConstant(theConservativeVN) && vnStore->IsVNCheckedBound(oldVN)) { - // Comparison is against the bound directly. - - newCmpArgVN = theConservativeVN; - vnStore->GetCompareCheckedBound(oldCmpVN, &info); + vnStore->SetVNIsCheckedBound(theConservativeVN); } - else + + GenTree* cmp; + if ((m_pCompiler->optCseCheckedBoundMap != nullptr) && + (m_pCompiler->optCseCheckedBoundMap->Lookup(exp, &cmp))) { - // Comparison is against the bound +/- some offset. + // Propagate the new value number to this compare node as well, since + // subsequent range check elimination will try to correlate it with + // the other appearances that are getting CSEd. - assert(vnStore->IsVNCompareCheckedBoundArith(oldCmpVN)); - vnStore->GetCompareCheckedBoundArithInfo(oldCmpVN, &info); - newCmpArgVN = vnStore->VNForFunc(vnStore->TypeOfVN(info.arrOp), (VNFunc)info.arrOper, - info.arrOp, theConservativeVN); + ValueNum oldCmpVN = cmp->gtVNPair.GetConservative(); + ValueNum newCmpArgVN; + + ValueNumStore::CompareCheckedBoundArithInfo info; + if (vnStore->IsVNCompareCheckedBound(oldCmpVN)) + { + // Comparison is against the bound directly. + + newCmpArgVN = theConservativeVN; + vnStore->GetCompareCheckedBound(oldCmpVN, &info); + } + else + { + // Comparison is against the bound +/- some offset. + + assert(vnStore->IsVNCompareCheckedBoundArith(oldCmpVN)); + vnStore->GetCompareCheckedBoundArithInfo(oldCmpVN, &info); + newCmpArgVN = vnStore->VNForFunc(vnStore->TypeOfVN(info.arrOp), (VNFunc)info.arrOper, + info.arrOp, theConservativeVN); + } + ValueNum newCmpVN = vnStore->VNForFunc(vnStore->TypeOfVN(oldCmpVN), (VNFunc)info.cmpOper, + info.cmpOp, newCmpArgVN); + cmp->gtVNPair.SetConservative(newCmpVN); } - ValueNum newCmpVN = vnStore->VNForFunc(vnStore->TypeOfVN(oldCmpVN), (VNFunc)info.cmpOper, - info.cmpOp, newCmpArgVN); - cmp->gtVNPair.SetConservative(newCmpVN); } } #ifdef DEBUG @@ -2878,10 +3076,9 @@ class CSE_Heuristic } #endif - GenTree* cseVal = cse; - GenTree* curSideEff = sideEffList; - ValueNumStore* vnStore = m_pCompiler->vnStore; - ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet(); + GenTree* cseVal = cse; + GenTree* curSideEff = sideEffList; + ValueNumPair exceptions_vnp = ValueNumStore::VNPForEmptyExcSet(); while ((curSideEff->OperGet() == GT_COMMA) || (curSideEff->OperGet() == GT_ASG)) { @@ -2936,6 +3133,17 @@ class CSE_Heuristic exp->gtCSEnum = NO_CSE; // clear the gtCSEnum field GenTree* val = exp; + if (isSharedConst) + { + ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); + ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue(currVN); + ssize_t delta = curValue - dsc->csdConstDefValue; + if (delta != 0) + { + val = m_pCompiler->gtNewIconNode(dsc->csdConstDefValue, cseLclVarTyp); + val->gtVNPair.SetBoth(dsc->csdConstDefVN); + } + } /* Create an assignment of the value to the temp */ GenTree* asg = m_pCompiler->gtNewTempAssign(cseLclVarNum, val); @@ -2977,19 +3185,37 @@ class CSE_Heuristic } /* Create a reference to the CSE temp */ - GenTree* ref = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); - ref->gtVNPair = val->gtVNPair; // The new 'ref' is the same as 'val' + GenTree* cseLclVar = m_pCompiler->gtNewLclvNode(cseLclVarNum, cseLclVarTyp); + cseLclVar->gtVNPair.SetBoth(dsc->csdConstDefVN); + + // Assign the ssa num for the lclvar use. Note it may be the reserved num. + cseLclVar->AsLclVarCommon()->SetSsaNum(cseSsaNum); - // Assign the ssa num for the ref use. Note it may be the reserved num. - ref->AsLclVarCommon()->SetSsaNum(cseSsaNum); + GenTree* cseUse = cseLclVar; + if (isSharedConst) + { + ValueNum currVN = m_pCompiler->vnStore->VNLiberalNormalValue(exp->gtVNPair); + ssize_t curValue = m_pCompiler->vnStore->CoercedConstantValue(currVN); + ssize_t delta = curValue - dsc->csdConstDefValue; + if (delta != 0) + { + GenTree* deltaNode = m_pCompiler->gtNewIconNode(delta, cseLclVarTyp); + cseUse = m_pCompiler->gtNewOperNode(GT_ADD, cseLclVarTyp, cseLclVar, deltaNode); + cseUse->SetDoNotCSE(); + } + } + cseUse->gtVNPair = val->gtVNPair; // The 'cseUse' is equal to 'val' /* Create a comma node for the CSE assignment */ - cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, origAsg, ref); - cse->gtVNPair = ref->gtVNPair; // The comma's value is the same as 'val' - // as the assignment to the CSE LclVar - // cannot add any new exceptions + cse = m_pCompiler->gtNewOperNode(GT_COMMA, expTyp, origAsg, cseUse); + cse->gtVNPair = cseUse->gtVNPair; // The comma's value is the same as 'val' + // as the assignment to the CSE LclVar + // cannot add any new exceptions } + cse->CopyReg(exp); // The cse inheirits any reg num property from the orginal exp node + exp->ClearRegNum(); // The exp node (for a CSE def) no longer has a register requirement + // Walk the statement 'stmt' and find the pointer // in the tree is pointing to 'exp' // @@ -3069,9 +3295,19 @@ class CSE_Heuristic #ifdef DEBUG if (m_pCompiler->verbose) { - printf("\nConsidering CSE #%02u {$%-3x, $%-3x} [def=%3u, use=%3u, cost=%3u%s]\n", candidate.CseIndex(), - dsc->csdHashKey, dsc->defExcSetPromise, candidate.DefCount(), candidate.UseCount(), - candidate.Cost(), dsc->csdLiveAcrossCall ? ", call" : " "); + if (!Compiler::Is_Shared_Const_CSE(dsc->csdHashKey)) + { + printf("\nConsidering CSE #%02u {$%-3x, $%-3x} [def=%3u, use=%3u, cost=%3u%s]\n", + candidate.CseIndex(), dsc->csdHashKey, dsc->defExcSetPromise, candidate.DefCount(), + candidate.UseCount(), candidate.Cost(), dsc->csdLiveAcrossCall ? ", call" : " "); + } + else + { + size_t kVal = Compiler::Decode_Shared_Const_CSE_Value(dsc->csdHashKey); + printf("\nConsidering CSE #%02u {K_%p} [def=%3u, use=%3u, cost=%3u%s]\n", candidate.CseIndex(), + dspPtr(kVal), candidate.DefCount(), candidate.UseCount(), candidate.Cost(), + dsc->csdLiveAcrossCall ? ", call" : " "); + } printf("CSE Expression : \n"); m_pCompiler->gtDispTree(candidate.Expr()); printf("\n"); @@ -3306,8 +3542,11 @@ bool Compiler::optIsCSEcandidate(GenTree* tree) return (tree->AsOp()->gtOp1->gtOper != GT_ARR_ELEM); - case GT_CNS_INT: case GT_CNS_LNG: +#ifndef TARGET_64BIT + return false; // Don't CSE 64-bit constants on 32-bit platforms +#endif + case GT_CNS_INT: case GT_CNS_DBL: case GT_CNS_STR: return true; // We reach here only when CSE_CONSTS is enabled diff --git a/src/coreclr/src/jit/target.h b/src/coreclr/src/jit/target.h index 6e79bfac3df53..4b6c010e7739d 100644 --- a/src/coreclr/src/jit/target.h +++ b/src/coreclr/src/jit/target.h @@ -31,12 +31,15 @@ // with static const members of Target #if defined(TARGET_XARCH) #define REGMASK_BITS 32 +#define CSE_CONST_SHARED_LOW_BITS 16 #elif defined(TARGET_ARM) #define REGMASK_BITS 64 +#define CSE_CONST_SHARED_LOW_BITS 12 #elif defined(TARGET_ARM64) #define REGMASK_BITS 64 +#define CSE_CONST_SHARED_LOW_BITS 12 #else #error Unsupported or unset target architecture @@ -1999,9 +2002,13 @@ C_ASSERT((RBM_INT_CALLEE_SAVED & RBM_FPBASE) == RBM_NONE); #ifdef TARGET_64BIT typedef unsigned __int64 target_size_t; typedef __int64 target_ssize_t; -#else // !TARGET_64BIT +#define TARGET_SIGN_BIT (1ULL << 63) + +#else // !TARGET_64BIT typedef unsigned int target_size_t; typedef int target_ssize_t; +#define TARGET_SIGN_BIT (1ULL << 31) + #endif // !TARGET_64BIT C_ASSERT(sizeof(target_size_t) == TARGET_POINTER_SIZE); diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r4.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r4.ilproj index cef275ea49426..ab20898083405 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r4.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r4.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r8.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r8.ilproj index e92e57de25c75..ca0982693df23 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r8.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/beq_r8.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r4.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r4.ilproj index b5e9a76cdac4e..191bb09523af6 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r4.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r4.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r8.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r8.ilproj index 62ed8ad09721d..5e8c0aed782cf 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r8.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_r8.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r4.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r4.ilproj index ba5b4cd857202..b0a46391cdbd7 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r4.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r4.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r8.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r8.ilproj index ca67fefcb7644..8d689526c4de6 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r8.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bge_un_r8.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r4.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r4.ilproj index 891402dd071ab..a3b92f67f70bc 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r4.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r4.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r8.ilproj b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r8.ilproj index eb0db5833a1da..ededde0d346d1 100644 --- a/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r8.ilproj +++ b/src/tests/JIT/IL_Conformance/Old/Conformance_Base/bne_un_r8.ilproj @@ -2,7 +2,7 @@ Exe true - 1 + 0 PdbOnly diff --git a/src/tests/JIT/Methodical/NaN/arithm64_cs_d.csproj b/src/tests/JIT/Methodical/NaN/arithm64_cs_d.csproj index 3c8a6b09632fa..882dbd5ce4de9 100644 --- a/src/tests/JIT/Methodical/NaN/arithm64_cs_d.csproj +++ b/src/tests/JIT/Methodical/NaN/arithm64_cs_d.csproj @@ -1,7 +1,7 @@ Exe - 1 + 0 Full diff --git a/src/tests/JIT/Methodical/NaN/arithm64_cs_do.csproj b/src/tests/JIT/Methodical/NaN/arithm64_cs_do.csproj index 645b85538bf65..63895dbcfb49d 100644 --- a/src/tests/JIT/Methodical/NaN/arithm64_cs_do.csproj +++ b/src/tests/JIT/Methodical/NaN/arithm64_cs_do.csproj @@ -1,7 +1,7 @@ Exe - 1 + 0 Full diff --git a/src/tests/JIT/Methodical/NaN/arithm64_cs_r.csproj b/src/tests/JIT/Methodical/NaN/arithm64_cs_r.csproj index f70b11917026e..750ade6906212 100644 --- a/src/tests/JIT/Methodical/NaN/arithm64_cs_r.csproj +++ b/src/tests/JIT/Methodical/NaN/arithm64_cs_r.csproj @@ -1,7 +1,7 @@ Exe - 1 + 0 None diff --git a/src/tests/JIT/Methodical/NaN/arithm64_cs_ro.csproj b/src/tests/JIT/Methodical/NaN/arithm64_cs_ro.csproj index a453a807abb14..21d4a502b6b40 100644 --- a/src/tests/JIT/Methodical/NaN/arithm64_cs_ro.csproj +++ b/src/tests/JIT/Methodical/NaN/arithm64_cs_ro.csproj @@ -1,7 +1,7 @@ Exe - 1 + 0 None