diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h index d09372bd70da7..1fa48c906b2c1 100644 --- a/src/coreclr/src/jit/compiler.h +++ b/src/coreclr/src/jit/compiler.h @@ -4356,11 +4356,10 @@ class Compiler InlineCandidateInfo** ppInlineCandidateInfo, InlineResult* inlineResult); - void impInlineRecordArgInfo(InlineInfo* pInlineInfo, - GenTree* curArgVal, - unsigned argNum, - unsigned __int64 bbFlags, - InlineResult* inlineResult); + void impInlineRecordArgInfo(InlineInfo* pInlineInfo, + GenTree* curArgVal, + unsigned argNum, + InlineResult* inlineResult); void impInlineInitVars(InlineInfo* pInlineInfo); @@ -4665,6 +4664,8 @@ class Compiler BasicBlock* canonicalBlock, flowList* predEdge); + GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType); + #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals // when this is necessary. @@ -10447,6 +10448,7 @@ class GenTreeVisitor case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: + case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_RETURN: diff --git a/src/coreclr/src/jit/compiler.hpp b/src/coreclr/src/jit/compiler.hpp index 1ffe01700e4c2..8d3207c2112ee 100644 --- a/src/coreclr/src/jit/compiler.hpp +++ b/src/coreclr/src/jit/compiler.hpp @@ -4381,6 +4381,7 @@ void GenTree::VisitOperands(TVisitor visitor) case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: + case GT_PUTARG_TYPE: #if FEATURE_ARG_SPLIT case GT_PUTARG_SPLIT: #endif // FEATURE_ARG_SPLIT diff --git a/src/coreclr/src/jit/ee_il_dll.cpp b/src/coreclr/src/jit/ee_il_dll.cpp index 30e69dbf98d58..7a7229a2d2dc9 100644 --- a/src/coreclr/src/jit/ee_il_dll.cpp +++ b/src/coreclr/src/jit/ee_il_dll.cpp @@ -360,7 +360,7 @@ unsigned Compiler::eeGetArgSize(CORINFO_ARG_LIST_HANDLE list, CORINFO_SIG_INFO* if (varTypeIsStruct(argType)) { unsigned structSize = info.compCompHnd->getClassSize(argClass); - return structSize; // TODO: roundUp() needed here? + return roundUp(structSize, TARGET_POINTER_SIZE); } #endif // UNIX_AMD64_ABI return TARGET_POINTER_SIZE; diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp index dac88350022fb..db70d1ecf277b 100644 --- a/src/coreclr/src/jit/flowgraph.cpp +++ b/src/coreclr/src/jit/flowgraph.cpp @@ -22743,7 +22743,11 @@ void Compiler::fgAttachStructInlineeToAsg(GenTree* tree, GenTree* child, CORINFO // If the return type is a struct type and we're on a platform // where structs can be returned in multiple registers, ensure the // call has a suitable parent. - +// +// If the original call type and the substitution type are different +// the functions makes necessary updates. It could happen if there was +// an implicit conversion in the inlinee body. +// Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTree** pTree, fgWalkData* data) { // All the operations here and in the corresponding postorder @@ -22798,6 +22802,29 @@ Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTr } #endif // DEBUG + var_types newType = inlineCandidate->TypeGet(); + + // If we end up swapping type we may need to retype the tree: + if (retType != newType) + { + if ((retType == TYP_BYREF) && (tree->OperGet() == GT_IND)) + { + // - in an RVA static if we've reinterpreted it as a byref; + assert(newType == TYP_I_IMPL); + JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n"); + inlineCandidate->gtType = TYP_BYREF; + } + else + { + // - under a call if we changed size of the argument. + GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, inlineCandidate, retType); + if (putArgType != nullptr) + { + inlineCandidate = putArgType; + } + } + } + tree->ReplaceWith(inlineCandidate, comp); comp->compCurBB->bbFlags |= (bbFlags & BBF_SPLIT_GAINED); @@ -22809,17 +22836,6 @@ Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTr printf("\n"); } #endif // DEBUG - - var_types newType = tree->TypeGet(); - - // If we end up swapping in an RVA static we may need to retype it here, - // if we've reinterpreted it as a byref. - if ((retType != newType) && (retType == TYP_BYREF) && (tree->OperGet() == GT_IND)) - { - assert(newType == TYP_I_IMPL); - JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n"); - tree->gtType = TYP_BYREF; - } } // If an inline was rejected and the call returns a struct, we may @@ -23101,8 +23117,16 @@ Compiler::fgWalkResult Compiler::fgLateDevirtualization(GenTree** pTree, fgWalkD } else { - GenTree* foldedTree = comp->gtFoldExpr(tree); - *pTree = foldedTree; + const var_types retType = tree->TypeGet(); + GenTree* foldedTree = comp->gtFoldExpr(tree); + const var_types newType = foldedTree->TypeGet(); + + GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, foldedTree, retType); + if (putArgType != nullptr) + { + foldedTree = putArgType; + } + *pTree = foldedTree; } return WALK_CONTINUE; @@ -23799,8 +23823,12 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) { const InlArgInfo& argInfo = inlArgInfo[argNum]; const bool argIsSingleDef = !argInfo.argHasLdargaOp && !argInfo.argHasStargOp; - GenTree* const argNode = inlArgInfo[argNum].argNode; - unsigned __int64 bbFlags = inlArgInfo[argNum].bbFlags; + GenTree* argNode = inlArgInfo[argNum].argNode; + const bool argHasPutArg = argNode->OperIs(GT_PUTARG_TYPE); + + unsigned __int64 bbFlags = 0; + argNode = argNode->gtSkipPutArgType(); + argNode = argNode->gtRetExprVal(&bbFlags); if (argInfo.argHasTmp) { @@ -23820,7 +23848,11 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) GenTree* argSingleUseNode = argInfo.argBashTmpNode; - if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef) + // argHasPutArg disqualifies the arg from a direct substitution because we don't have information about + // its user. For example: replace `LCL_VAR short` with `PUTARG_TYPE short->LCL_VAR int`, + // we should keep `PUTARG_TYPE` iff the user is a call that needs `short` and delete it otherwise. + if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef && + !argHasPutArg) { // Change the temp in-place to the actual argument. // We currently do not support this for struct arguments, so it must not be a GT_OBJ. @@ -26501,6 +26533,44 @@ void Compiler::fgTailMergeThrowsJumpToHelper(BasicBlock* predBlock, canonicalBlock->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL); } +//------------------------------------------------------------------------ +// fgCheckCallArgUpdate: check if we are replacing a call argument and add GT_PUTARG_TYPE if necessary. +// +// Arguments: +// parent - the parent that could be a call; +// child - the new child node; +// origType - the original child type; +// +// Returns: +// PUT_ARG_TYPE node if it is needed, nullptr otherwise. +// +GenTree* Compiler::fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType) +{ + if ((parent == nullptr) || !parent->IsCall()) + { + return nullptr; + } + const var_types newType = child->TypeGet(); + if (newType == origType) + { + return nullptr; + } + if (varTypeIsStruct(origType) || (genTypeSize(origType) == genTypeSize(newType))) + { + assert(!varTypeIsStruct(newType)); + return nullptr; + } + GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, origType, child); +#if defined(DEBUG) + if (verbose) + { + printf("For call [%06d] the new argument's type [%06d]", dspTreeID(parent), dspTreeID(child)); + printf(" does not match the original type size, add a GT_PUTARG_TYPE [%06d]\n", dspTreeID(parent)); + } +#endif + return putArgType; +} + #ifdef DEBUG //------------------------------------------------------------------------ diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp index 72c40b36c2c4f..a55d157714c7e 100644 --- a/src/coreclr/src/jit/gentree.cpp +++ b/src/coreclr/src/jit/gentree.cpp @@ -5112,6 +5112,7 @@ bool GenTree::TryGetUse(GenTree* def, GenTree*** use) case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: + case GT_PUTARG_TYPE: case GT_RETURNTRAP: case GT_NOP: case GT_RETURN: @@ -9107,6 +9108,7 @@ GenTreeUseEdgeIterator::GenTreeUseEdgeIterator(GenTree* node) case GT_NULLCHECK: case GT_PUTARG_REG: case GT_PUTARG_STK: + case GT_PUTARG_TYPE: case GT_BSWAP: case GT_BSWAP16: case GT_KEEPALIVE: @@ -12021,7 +12023,7 @@ void Compiler::gtGetArgMsg(GenTreeCall* call, GenTree* arg, unsigned argNum, cha } #endif // TARGET_ARM #if FEATURE_FIXED_OUT_ARGS - sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->slotNum * TARGET_POINTER_SIZE, 0); + sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0); #else sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0); #endif @@ -15517,7 +15519,7 @@ GenTree* Compiler::gtNewTempAssign( // There are 2 special cases: // 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields, // the field was transformed as IND opr GT_LCL_FLD; - // 2. we are propogating `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`; + // 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`; // in these cases, we can use the type of the merge return for the assignment. assert(val->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT)); assert(!compDoOldStructRetyping()); diff --git a/src/coreclr/src/jit/gentree.h b/src/coreclr/src/jit/gentree.h index 6e74955d61473..9fe2bcaf4e900 100644 --- a/src/coreclr/src/jit/gentree.h +++ b/src/coreclr/src/jit/gentree.h @@ -1736,7 +1736,9 @@ struct GenTree inline GenTree* gtEffectiveVal(bool commaOnly = false); // Tunnel through any GT_RET_EXPRs - inline GenTree* gtRetExprVal(unsigned __int64* pbbFlags); + inline GenTree* gtRetExprVal(unsigned __int64* pbbFlags = nullptr); + + inline GenTree* gtSkipPutArgType(); // Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself inline GenTree* gtSkipReloadOrCopy(); @@ -7111,14 +7113,15 @@ inline GenTree* GenTree::gtGetOp2IfPresent() const return op2; } -inline GenTree* GenTree::gtEffectiveVal(bool commaOnly) +inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */) { GenTree* effectiveVal = this; for (;;) { + assert(!effectiveVal->OperIs(GT_PUTARG_TYPE)); if (effectiveVal->gtOper == GT_COMMA) { - effectiveVal = effectiveVal->AsOp()->gtOp2; + effectiveVal = effectiveVal->AsOp()->gtGetOp2(); } else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr)) { @@ -7147,23 +7150,49 @@ inline GenTree* GenTree::gtEffectiveVal(bool commaOnly) // Multi-level inlines can form chains of GT_RET_EXPRs. // This method walks back to the root of the chain. -inline GenTree* GenTree::gtRetExprVal(unsigned __int64* pbbFlags) +inline GenTree* GenTree::gtRetExprVal(unsigned __int64* pbbFlags /* = nullptr */) { GenTree* retExprVal = this; unsigned __int64 bbFlags = 0; - assert(pbbFlags != nullptr); + assert(!retExprVal->OperIs(GT_PUTARG_TYPE)); - for (; retExprVal->gtOper == GT_RET_EXPR; retExprVal = retExprVal->AsRetExpr()->gtInlineCandidate) + while (retExprVal->OperIs(GT_RET_EXPR)) { - bbFlags = retExprVal->AsRetExpr()->bbFlags; + const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr(); + bbFlags = retExpr->bbFlags; + retExprVal = retExpr->gtInlineCandidate; } - *pbbFlags = bbFlags; + if (pbbFlags != nullptr) + { + *pbbFlags = bbFlags; + } return retExprVal; } +//------------------------------------------------------------------------- +// gtSkipPutArgType - skip PUTARG_TYPE if it is presented. +// +// Returns: +// the original tree or its child if it was a PUTARG_TYPE. +// +// Notes: +// PUTARG_TYPE should be skipped when we are doing transformations +// that are not affected by ABI, for example: inlining, implicit byref morphing. +// +inline GenTree* GenTree::gtSkipPutArgType() +{ + if (OperIs(GT_PUTARG_TYPE)) + { + GenTree* res = AsUnOp()->gtGetOp1(); + assert(!res->OperIs(GT_PUTARG_TYPE)); + return res; + } + return this; +} + inline GenTree* GenTree::gtSkipReloadOrCopy() { // There can be only one reload or copy (we can't have a reload/copy of a reload/copy) diff --git a/src/coreclr/src/jit/gtlist.h b/src/coreclr/src/jit/gtlist.h index 640affba218b3..f73f0c6ce68d7 100644 --- a/src/coreclr/src/jit/gtlist.h +++ b/src/coreclr/src/jit/gtlist.h @@ -292,6 +292,7 @@ GTNODE(PUTARG_REG , GenTreeMultiRegOp ,0,GTK_UNOP) #else GTNODE(PUTARG_REG , GenTreeOp ,0,GTK_UNOP) // operator that places outgoing arg in register #endif +GTNODE(PUTARG_TYPE , GenTreeOp ,0,GTK_UNOP|GTK_NOTLIR) // operator that places saves argument type between importation and morph GTNODE(PUTARG_STK , GenTreePutArgStk ,0,GTK_UNOP|GTK_NOVALUE) // operator that places outgoing arg in stack #if FEATURE_ARG_SPLIT GTNODE(PUTARG_SPLIT , GenTreePutArgSplit ,0,GTK_UNOP) // operator that places outgoing arg in registers with stack (split struct in ARM32) diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp index 392443739ed21..1536db7ed3806 100644 --- a/src/coreclr/src/jit/importer.cpp +++ b/src/coreclr/src/jit/importer.cpp @@ -935,16 +935,15 @@ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass); } - CORINFO_ARG_LIST_HANDLE argLst = sig->args; - CORINFO_CLASS_HANDLE argClass; - CORINFO_CLASS_HANDLE argRealClass; + CORINFO_ARG_LIST_HANDLE sigArgs = sig->args; GenTreeCall::Use* arg; for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--) { PREFIX_ASSUME(arg != nullptr); - CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass)); + CORINFO_CLASS_HANDLE classHnd; + CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd)); var_types jitSigType = JITtype2varType(corType); @@ -954,7 +953,6 @@ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig } // insert implied casts (from float to double or double to float) - if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT)) { arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE)); @@ -965,21 +963,35 @@ GenTreeCall::Use* Compiler::impPopCallArgs(unsigned count, CORINFO_SIG_INFO* sig } // insert any widening or narrowing casts for backwards compatibility - arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType)); if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR && - corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr) + corType != CORINFO_TYPE_VAR) + { + CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs); + if (argRealClass != nullptr) + { + // Make sure that all valuetypes (including enums) that we push are loaded. + // This is to guarantee that if a GC is triggered from the prestub of this methods, + // all valuetypes in the method signature are already loaded. + // We need to be able to find the size of the valuetypes, but we cannot + // do a class-load from within GC. + info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); + } + } + + const var_types nodeArgType = arg->GetNode()->TypeGet(); + if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType)) { - // Make sure that all valuetypes (including enums) that we push are loaded. - // This is to guarantee that if a GC is triggered from the prestub of this methods, - // all valuetypes in the method signature are already loaded. - // We need to be able to find the size of the valuetypes, but we cannot - // do a class-load from within GC. - info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass); + assert(!varTypeIsStruct(nodeArgType)); + // Some ABI require precise size information for call arguments less than target pointer size, + // for example arm64 OSX. Create a special node to keep this information until morph + // consumes it into `fgArgInfo`. + GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode()); + arg->SetNode(putArgType); } - argLst = info.compCompHnd->getArgNext(argLst); + sigArgs = info.compCompHnd->getArgNext(sigArgs); } } @@ -19212,20 +19224,24 @@ void Compiler::impCheckCanInline(GenTreeCall* call, // properties are used later by impInlineFetchArg to determine how best to // pass the argument into the inlinee. -void Compiler::impInlineRecordArgInfo( - InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, unsigned __int64 bbFlags, InlineResult* inlineResult) +void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo, + GenTree* curArgVal, + unsigned argNum, + InlineResult* inlineResult) { InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum]; + inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR. + + curArgVal = curArgVal->gtSkipPutArgType(); + curArgVal = curArgVal->gtRetExprVal(); + if (curArgVal->gtOper == GT_MKREFANY) { inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY); return; } - inlCurArgInfo->argNode = curArgVal; - inlCurArgInfo->bbFlags = bbFlags; - GenTree* lclVarTree; const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree); @@ -19379,12 +19395,10 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) assert((methInfo->args.hasThis()) == (thisArg != nullptr)); - if (thisArg) + if (thisArg != nullptr) { - inlArgInfo[0].argIsThis = true; - unsigned __int64 bbFlags = 0; - GenTree* actualThisArg = thisArg->GetNode()->gtRetExprVal(&bbFlags); - impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, bbFlags, inlineResult); + inlArgInfo[0].argIsThis = true; + impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult); if (inlineResult->IsFailure()) { @@ -19419,9 +19433,8 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) continue; } - unsigned __int64 bbFlags = 0; - GenTree* actualArg = use.GetNode()->gtRetExprVal(&bbFlags); - impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, bbFlags, inlineResult); + GenTree* actualArg = use.GetNode(); + impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult); if (inlineResult->IsFailure()) { @@ -19519,8 +19532,12 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) GenTree* inlArgNode = inlArgInfo[i].argNode; - if (sigType != inlArgNode->gtType) + if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE)) { + assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType)); + assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType) && + genTypeSize(inlArgNode->gtType) == genTypeSize(sigType)); + /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints, but in bad IL cases with caller-callee signature mismatches we can see other types. Intentionally reject cases with mismatches so the jit is more flexible when @@ -19536,10 +19553,23 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) return; } + GenTree** pInlArgNode; + if (inlArgNode->OperIs(GT_PUTARG_TYPE)) + { + // There was a widening or narrowing cast. + GenTreeUnOp* putArgType = inlArgNode->AsUnOp(); + pInlArgNode = &putArgType->gtOp1; + inlArgNode = putArgType->gtOp1; + } + else + { + // The same size but different type of the arguments. + pInlArgNode = &inlArgInfo[i].argNode; + } + /* Is it a narrowing or widening cast? * Widening casts are ok since the value computed is already * normalized to an int (on the IL stack) */ - if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType)) { if (sigType == TYP_BYREF) @@ -19565,37 +19595,34 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) } else if (genTypeSize(sigType) < EA_PTRSIZE) { - /* Narrowing cast */ - - if (inlArgNode->gtOper == GT_LCL_VAR && - !lvaTable[inlArgNode->AsLclVarCommon()->GetLclNum()].lvNormalizeOnLoad() && - sigType == lvaGetRealType(inlArgNode->AsLclVarCommon()->GetLclNum())) + // Narrowing cast. + if (inlArgNode->OperIs(GT_LCL_VAR)) { - /* We don't need to insert a cast here as the variable - was assigned a normalized value of the right type */ - - continue; + const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum(); + if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum)) + { + // We don't need to insert a cast here as the variable + // was assigned a normalized value of the right type. + continue; + } } - inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); + inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; - - /* Try to fold the node in case we have constant arguments */ - + // Try to fold the node in case we have constant arguments. if (inlArgInfo[i].argIsInvariant) { - inlArgNode = gtFoldExprConst(inlArgNode); - inlArgInfo[i].argNode = inlArgNode; + inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } + *pInlArgNode = inlArgNode; } #ifdef TARGET_64BIT else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType)) { // This should only happen for int -> native int widening - inlArgNode = inlArgInfo[i].argNode = - gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); + inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType); inlArgInfo[i].argIsLclVar = false; @@ -19603,10 +19630,10 @@ void Compiler::impInlineInitVars(InlineInfo* pInlineInfo) if (inlArgInfo[i].argIsInvariant) { - inlArgNode = gtFoldExprConst(inlArgNode); - inlArgInfo[i].argNode = inlArgNode; + inlArgNode = gtFoldExprConst(inlArgNode); assert(inlArgNode->OperIsConst()); } + *pInlArgNode = inlArgNode; } #endif // TARGET_64BIT } @@ -19833,6 +19860,8 @@ GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, In const var_types lclTyp = lclInfo.lclTypeInfo; GenTree* op1 = nullptr; + GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal(); + if (argInfo.argIsInvariant && !argCanBeModified) { // Directly substitute constants or addresses of locals @@ -19843,7 +19872,7 @@ GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, In // impInlineExpr. Then gtFoldExpr() could change it, causing // further references to the argument working off of the // bashed copy. - op1 = gtCloneExpr(argInfo.argNode); + op1 = gtCloneExpr(argNode); PREFIX_ASSUME(op1 != nullptr); argInfo.argTmpNum = BAD_VAR_NUM; @@ -19863,7 +19892,7 @@ GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, In // Directly substitute unaliased caller locals for args that cannot be modified // // Use the caller-supplied node if this is the first use. - op1 = argInfo.argNode; + op1 = argNode; argInfo.argTmpNum = op1->AsLclVarCommon()->GetLclNum(); // Use an equivalent copy if this is the second or subsequent @@ -19906,8 +19935,8 @@ GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, In then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR. */ - assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL); - op1 = gtCloneExpr(argInfo.argNode); + assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL); + op1 = gtCloneExpr(argNode); } else { @@ -19948,7 +19977,7 @@ GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, In assert(lvaTable[tmpNum].lvSingleDef == 0); lvaTable[tmpNum].lvSingleDef = 1; JITDUMP("Marked V%02u as a single def temp\n", tmpNum); - lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); + lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef()); } else { @@ -20652,8 +20681,7 @@ void Compiler::impDevirtualizeCall(GenTreeCall* call, if ((objClass == nullptr) || !isExact) { // Walk back through any return expression placeholders - unsigned __int64 bbFlags = 0; - actualThisObj = thisObj->gtRetExprVal(&bbFlags); + actualThisObj = thisObj->gtRetExprVal(); // See if we landed on a call to a special intrinsic method if (actualThisObj->IsCall()) diff --git a/src/coreclr/src/jit/inline.h b/src/coreclr/src/jit/inline.h index 4918e6ca7bef5..54d4237987d8a 100644 --- a/src/coreclr/src/jit/inline.h +++ b/src/coreclr/src/jit/inline.h @@ -548,8 +548,6 @@ struct InlineCandidateInfo : public GuardedDevirtualizationCandidateInfo struct InlArgInfo { - unsigned __int64 bbFlags; // basic block flags that need to be added when replacing GT_RET_EXPR - // with argNode GenTree* argNode; // caller node for this argument GenTree* argBashTmpNode; // tmp node created, if it may be replaced with actual arg unsigned argTmpNum; // the argument tmp number diff --git a/src/coreclr/src/jit/lclvars.cpp b/src/coreclr/src/jit/lclvars.cpp index f6f542631b99e..8ea469a6e3684 100644 --- a/src/coreclr/src/jit/lclvars.cpp +++ b/src/coreclr/src/jit/lclvars.cpp @@ -1015,12 +1015,21 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo) #endif // TARGET_XXX #if FEATURE_FASTTAILCALL - varDsc->SetStackOffset(varDscInfo->stackArgSize); #if defined(OSX_ARM64_ABI) + unsigned argAlignment = TARGET_POINTER_SIZE; + if (argSize <= TARGET_POINTER_SIZE) + { + argAlignment = argSize; + } + varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment); + assert(argSize % argAlignment == 0); +#else // !OSX_ARM64_ABI + assert((argSize % TARGET_POINTER_SIZE) == 0); + assert((varDscInfo->stackArgSize % TARGET_POINTER_SIZE) == 0); +#endif // !OSX_ARM64_ABI + + varDsc->SetStackOffset(varDscInfo->stackArgSize); varDscInfo->stackArgSize += argSize; -#else - varDscInfo->stackArgSize += roundUp(argSize, TARGET_POINTER_SIZE); -#endif #endif // FEATURE_FASTTAILCALL } @@ -5738,6 +5747,18 @@ int Compiler::lvaAssignVirtualFrameOffsetToArg(unsigned lclNum, break; } #endif // TARGET_ARM +#if defined(OSX_ARM64_ABI) + unsigned argAlignment = TARGET_POINTER_SIZE; + if (argSize <= TARGET_POINTER_SIZE) + { + argAlignment = argSize; + } + argOffs = roundUp(argOffs, argAlignment); + assert((argOffs % argAlignment) == 0); +#else // !OSX_ARM64_ABI + assert((argSize % TARGET_POINTER_SIZE) == 0); + assert((argOffs % TARGET_POINTER_SIZE) == 0); +#endif // !OSX_ARM64_ABI varDsc->SetStackOffset(argOffs); } diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp index c6886775c0798..71f905a79cc44 100644 --- a/src/coreclr/src/jit/morph.cpp +++ b/src/coreclr/src/jit/morph.cpp @@ -2883,9 +2883,60 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc; #endif // UNIX_AMD64_ABI +#if defined(DEBUG) + // Check that we have valid information about call's argument types. + // For example: + // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte)); + // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int)); + // etc. + if (call->callSig != nullptr) + { + CORINFO_SIG_INFO* sig = call->callSig; + const unsigned sigArgsCount = sig->numArgs; + + GenTreeCall::Use* nodeArgs = call->gtCallArgs; + // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie + // etc. + unsigned nodeArgsCount = call->NumChildren(); + + if (call->gtCallThisArg != nullptr) + { + // Handle the most common argument not in the `sig->numArgs`. + // so the following check works on more methods. + nodeArgsCount--; + } + + assert(nodeArgsCount >= sigArgsCount); + if ((nodeArgsCount == sigArgsCount) && + ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1))) + { + CORINFO_ARG_LIST_HANDLE sigArg = sig->args; + for (unsigned i = 0; i < sig->numArgs; ++i) + { + CORINFO_CLASS_HANDLE argClass; + const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass)); + const var_types sigType = JITtype2varType(corType); + + assert(nodeArgs != nullptr); + const GenTree* nodeArg = nodeArgs->GetNode(); + assert(nodeArg != nullptr); + const var_types nodeType = nodeArg->TypeGet(); + + assert((nodeType == sigType) || varTypeIsStruct(sigType) || + genTypeSize(nodeType) == genTypeSize(sigType)); + + sigArg = info.compCompHnd->getArgNext(sigArg); + nodeArgs = nodeArgs->GetNext(); + } + assert(nodeArgs == nullptr); + } + } +#endif // DEBUG + for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++) { - argx = args->GetNode(); + argx = args->GetNode()->gtSkipPutArgType(); + fgArgTabEntry* argEntry = nullptr; // Change the node to TYP_I_IMPL so we don't report GC info @@ -3150,6 +3201,12 @@ void Compiler::fgInitArgInfo(GenTreeCall* call) } } + if (args->GetNode()->OperIs(GT_PUTARG_TYPE)) + { + const GenTreeUnOp* putArgType = args->GetNode()->AsUnOp(); + byteSize = genTypeSize(putArgType->TypeGet()); + } + // The 'size' value has now must have been set. (the original value of zero is an invalid value) assert(size != 0); assert(byteSize != 0); @@ -12234,7 +12291,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) LclVarDsc* varDsc = lvaGetDesc(lclVar); if (varDsc->CanBeReplacedWithItsField(this)) { - // We can replace the struct with its only field and allow copy propogation to replace + // We can replace the struct with its only field and allow copy propagation to replace // return value that was written as a field. unsigned fieldLclNum = varDsc->lvFieldLclStart; LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum); @@ -12314,6 +12371,9 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) // Special handling for the arg list. return fgMorphArgList(tree->AsArgList(), mac); + case GT_PUTARG_TYPE: + return fgMorphTree(tree->AsUnOp()->gtGetOp1()); + default: break; } @@ -18345,14 +18405,15 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree) { for (GenTree** pTree : tree->UseEdges()) { - GenTree* childTree = *pTree; + GenTree** pTreeCopy = pTree; + GenTree* childTree = *pTree; if (childTree->gtOper == GT_LCL_VAR) { GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false); if (newChildTree != nullptr) { - changed = true; - *pTree = newChildTree; + changed = true; + *pTreeCopy = newChildTree; } } } diff --git a/src/coreclr/src/jit/rationalize.cpp b/src/coreclr/src/jit/rationalize.cpp index 8054fda6e8ef4..ebdac7725ab7f 100644 --- a/src/coreclr/src/jit/rationalize.cpp +++ b/src/coreclr/src/jit/rationalize.cpp @@ -303,8 +303,8 @@ void Rationalizer::SanityCheck() for (GenTree* tree = statement->GetTreeList(); tree; tree = tree->gtNext) { - // QMARK nodes should have been removed before this phase. - assert(tree->OperGet() != GT_QMARK); + // QMARK and PUT_ARG_TYPE nodes should have been removed before this phase. + assert(!tree->OperIs(GT_QMARK, GT_PUTARG_TYPE)); if (tree->OperGet() == GT_ASG) { diff --git a/src/libraries/Microsoft.CSharp/tests/IntegerBinaryOperationTests.cs b/src/libraries/Microsoft.CSharp/tests/IntegerBinaryOperationTests.cs index 8888f6ce06723..fe21b494228a2 100644 --- a/src/libraries/Microsoft.CSharp/tests/IntegerBinaryOperationTests.cs +++ b/src/libraries/Microsoft.CSharp/tests/IntegerBinaryOperationTests.cs @@ -498,6 +498,7 @@ public void RuntimeExpression(object x, object y, ExpressionType type, object re [MemberData(nameof(UInt64TestNotEquals))] [MemberData(nameof(UInt64TestSubtractions))] [ActiveIssue("https://github.com/dotnet/runtime/issues/26798", TargetFrameworkMonikers.NetFramework)] + [SkipOnCoreClr("https://github.com/dotnet/runtime/issues/42719", RuntimeConfiguration.Checked)] public void ConstantExpressions(object x, object y, ExpressionType type, object result, bool shouldSucceedChecked) { var callsite = GetBinaryOperationCallSite(type, false, true, true);