diff --git a/clang/lib/AST/ByteCode/IntegralAP.h b/clang/lib/AST/ByteCode/IntegralAP.h index a4d656433344b7..6ab3d09ec85d5b 100644 --- a/clang/lib/AST/ByteCode/IntegralAP.h +++ b/clang/lib/AST/ByteCode/IntegralAP.h @@ -61,7 +61,7 @@ template class IntegralAP final { IntegralAP(APInt V) : V(V) {} /// Arbitrary value for uninitialized variables. - IntegralAP() : IntegralAP(-1, 3) {} + IntegralAP() : IntegralAP(Signed ? -1 : 7, 3) {} IntegralAP operator-() const { return IntegralAP(-V); } IntegralAP operator-(const IntegralAP &Other) const { @@ -112,7 +112,9 @@ template class IntegralAP final { template static IntegralAP from(Integral I, unsigned BitWidth) { - APInt Copy = APInt(BitWidth, static_cast(I), InputSigned); + // TODO: Avoid implicit trunc? + APInt Copy = APInt(BitWidth, static_cast(I), InputSigned, + /*implicitTrunc=*/true); return IntegralAP(Copy); } diff --git a/clang/lib/CodeGen/CGVTT.cpp b/clang/lib/CodeGen/CGVTT.cpp index 20bd2c2fc2c642..989a07d09d50ee 100644 --- a/clang/lib/CodeGen/CGVTT.cpp +++ b/clang/lib/CodeGen/CGVTT.cpp @@ -85,8 +85,9 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT, cast(VTable->getValueType()) ->getElementType(AddressPoint.VTableIndex)); unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex; - llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true), - llvm::APInt(32, VTableSize - Offset, true)); + llvm::ConstantRange InRange( + llvm::APInt(32, (int)-Offset, true), + llvm::APInt(32, (int)(VTableSize - Offset), true)); llvm::Constant *Init = llvm::ConstantExpr::getGetElementPtr( VTable->getValueType(), VTable, Idxs, /*InBounds=*/true, InRange); diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index 75dab596e1b2c4..5bb0765cb0249c 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -2099,8 +2099,9 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base, unsigned VTableSize = ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex); unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex; - llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true), - llvm::APInt(32, VTableSize - Offset, true)); + llvm::ConstantRange InRange( + llvm::APInt(32, (int)-Offset, true), + llvm::APInt(32, (int)(VTableSize - Offset), true)); return llvm::ConstantExpr::getGetElementPtr( VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange); } diff --git a/clang/lib/Parse/ParseInit.cpp b/clang/lib/Parse/ParseInit.cpp index 0a9a359cdaf979..e7c8d79ccccac3 100644 --- a/clang/lib/Parse/ParseInit.cpp +++ b/clang/lib/Parse/ParseInit.cpp @@ -437,7 +437,9 @@ ExprResult Parser::createEmbedExpr() { SourceLocation StartLoc = ConsumeAnnotationToken(); if (Data->BinaryData.size() == 1) { Res = IntegerLiteral::Create(Context, - llvm::APInt(CHAR_BIT, Data->BinaryData.back()), + llvm::APInt(CHAR_BIT, Data->BinaryData.back(), + /*isSigned=*/false, + /*implicitTrunc=*/true), Context.UnsignedCharTy, StartLoc); } else { auto CreateStringLiteralFromStringRef = [&](StringRef Str, QualType Ty) { diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 531e7f6933d6d3..170a096166c066 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -3600,8 +3600,11 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) { ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) { unsigned IntSize = Context.getTargetInfo().getIntWidth(); - return IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val), - Context.IntTy, Loc); + // TODO: Avoid implicit trunc? + return IntegerLiteral::Create( + Context, + llvm::APInt(IntSize, Val, /*isSigned=*/false, /*implicitTrunc=*/true), + Context.IntTy, Loc); } static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal, diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp index d3e696a79b94fc..0232745b3c19e5 100644 --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -5697,7 +5697,9 @@ StmtResult SemaOpenMP::ActOnOpenMPCanonicalLoop(Stmt *AStmt) { llvm_unreachable("unhandled unary increment operator"); } Step = IntegerLiteral::Create( - Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction), LogicalTy, {}); + Ctx, + llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction, /*isSigned=*/true), + LogicalTy, {}); } else if (auto *IncBin = dyn_cast(Inc)) { if (IncBin->getOpcode() == BO_AddAssign) { Step = IncBin->getRHS(); diff --git a/lldb/source/Expression/DWARFExpression.cpp b/lldb/source/Expression/DWARFExpression.cpp index 97bcd4f7eec26f..af7c112b516b6b 100644 --- a/lldb/source/Expression/DWARFExpression.cpp +++ b/lldb/source/Expression/DWARFExpression.cpp @@ -860,10 +860,11 @@ llvm::Expected DWARFExpression::Evaluate( // TODO: Implement a real typed stack, and store the genericness of the value // there. auto to_generic = [&](auto v) { + // TODO: Avoid implicit trunc? bool is_signed = std::is_signed::value; - return Scalar(llvm::APSInt( - llvm::APInt(8 * opcodes.GetAddressByteSize(), v, is_signed), - !is_signed)); + return Scalar(llvm::APSInt(llvm::APInt(8 * opcodes.GetAddressByteSize(), v, + is_signed, /*implicitTrunc=*/true), + !is_signed)); }; // The default kind is a memory location. This is updated by any diff --git a/llvm/include/llvm/ADT/APFixedPoint.h b/llvm/include/llvm/ADT/APFixedPoint.h index e4aa82d7a41c31..70d7f325702cf5 100644 --- a/llvm/include/llvm/ADT/APFixedPoint.h +++ b/llvm/include/llvm/ADT/APFixedPoint.h @@ -168,7 +168,9 @@ class APFixedPoint { } APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema) - : APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned()), Sema) {} + : APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned(), + /*implicitTrunc=*/true), + Sema) {} // Zero initialization. APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {} diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index 102762dc7937c6..1f37631532b879 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -888,7 +888,8 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, APInt Offset = APInt( BitWidth, DL.getIndexedOffsetInType( - SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1))); + SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)), + /*isSigned=*/true, /*implicitTrunc=*/true); std::optional InRange = GEP->getInRange(); if (InRange) diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index f4b202791a7081..820b8e96c1d3ae 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -95,10 +95,8 @@ static bool isDereferenceableAndAlignedPointer( auto IsKnownDeref = [&]() { bool CheckForNonNull, CheckForFreed; - APInt KnownDerefBytes(Size.getBitWidth(), - V->getPointerDereferenceableBytes(DL, CheckForNonNull, - CheckForFreed)); - if (!KnownDerefBytes.getBoolValue() || !KnownDerefBytes.uge(Size) || + if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull, + CheckForFreed)) || CheckForFreed) return false; if (CheckForNonNull && diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index e1abf5e4d885ec..dc2dc4c1733b5e 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -767,6 +767,8 @@ SizeOffsetAPInt ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) return ObjectSizeOffsetVisitor::unknown(); + if (!isUIntN(IntTyBits, ElemSize.getKnownMinValue())) + return ObjectSizeOffsetVisitor::unknown(); APInt Size(IntTyBits, ElemSize.getKnownMinValue()); if (!I.isArrayAllocation()) return SizeOffsetAPInt(align(Size, I.getAlign()), Zero); diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 97ea405a5267ae..d164ae2038d2ed 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -6883,7 +6883,7 @@ const ConstantRange &ScalarEvolution::getRangeRef( bool CanBeNull, CanBeFreed; uint64_t DerefBytes = V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed); - if (DerefBytes > 1) { + if (DerefBytes > 1 && isUIntN(BitWidth, DerefBytes)) { // The highest address the object can start is DerefBytes bytes before // the end (unsigned max value). If this value is not a multiple of the // alignment, the last possible start value is the next lowest multiple diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index a8b5f96d850b55..ce34ce2c07d442 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -876,7 +876,8 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer { } else { int64_t Start = BitcodeReader::decodeSignRotatedValue(Record[OpNum++]); int64_t End = BitcodeReader::decodeSignRotatedValue(Record[OpNum++]); - return ConstantRange(APInt(BitWidth, Start), APInt(BitWidth, End)); + return ConstantRange(APInt(BitWidth, Start, true), + APInt(BitWidth, End, true)); } } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 498debf0955980..c238034047b2b5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1641,7 +1641,10 @@ SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT, assert((EltVT.getSizeInBits() >= 64 || (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) && "getConstant with a uint64_t value that doesn't fit in the type!"); - return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO); + // TODO: Avoid implicit trunc? + return getConstant(APInt(EltVT.getSizeInBits(), Val, /*isSigned=*/false, + /*implicitTrunc=*/true), + DL, VT, isT, isO); } SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 805b8ecf009598..d6b35a762636b4 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4333,7 +4333,8 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) { GTI.getSequentialElementStride(DAG.getDataLayout()); // We intentionally mask away the high bits here; ElementSize may not // fit in IdxTy. - APInt ElementMul(IdxSize, ElementSize.getKnownMinValue()); + APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(), + /*isSigned=*/false, /*implicitTrunc=*/true); bool ElementScalable = ElementSize.isScalable(); // If this is a scalar constant or a splat vector of constants, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 2a97580942df36..119c30a18681cd 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -2200,7 +2200,9 @@ ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() { bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS, int64_t DesiredMaskS) const { const APInt &ActualMask = RHS->getAPIntValue(); - const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); + // TODO: Avoid implicit trunc? + const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS, + /*isSigned=*/false, /*implicitTrunc=*/true); // If the actual mask exactly matches, success! if (ActualMask == DesiredMask) @@ -2229,7 +2231,9 @@ bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS, bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS, int64_t DesiredMaskS) const { const APInt &ActualMask = RHS->getAPIntValue(); - const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS); + // TODO: Avoid implicit trunc? + const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS, + /*isSigned=*/false, /*implicitTrunc=*/true); // If the actual mask exactly matches, success! if (ActualMask == DesiredMask) diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 793b8ff164c233..f265abc7453fa2 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -6813,7 +6813,9 @@ TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, PAmts.push_back(DAG.getConstant(P, DL, SVT)); KAmts.push_back( - DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); + DAG.getConstant(APInt(ShSVT.getSizeInBits(), K, /*isSigned=*/false, + /*implicitTrunc=*/true), + DL, ShSVT)); QAmts.push_back(DAG.getConstant(Q, DL, SVT)); return true; }; @@ -7084,7 +7086,9 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, PAmts.push_back(DAG.getConstant(P, DL, SVT)); AAmts.push_back(DAG.getConstant(A, DL, SVT)); KAmts.push_back( - DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); + DAG.getConstant(APInt(ShSVT.getSizeInBits(), K, /*isSigned=*/false, + /*implicitTrunc=*/true), + DL, ShSVT)); QAmts.push_back(DAG.getConstant(Q, DL, SVT)); return true; }; diff --git a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp index 4cce4a77b343f0..e3b7db2380bb00 100644 --- a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp +++ b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp @@ -588,7 +588,7 @@ GenericValue MCJIT::runFunction(Function *F, ArrayRef ArgValues) { return rv; } case Type::VoidTyID: - rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)()); + rv.IntVal = APInt(32, ((int (*)())(intptr_t)FPtr)(), true); return rv; case Type::FloatTyID: rv.FloatVal = ((float(*)())(intptr_t)FPtr)(); diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index fe3a086c5772de..2e995f641f52e0 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -932,7 +932,9 @@ Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) { } ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V, bool isSigned) { - return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned)); + // TODO: Avoid implicit trunc? + return get(Ty->getContext(), + APInt(Ty->getBitWidth(), V, isSigned, /*implicitTrunc=*/true)); } Constant *ConstantInt::get(Type *Ty, const APInt& V) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index ed06d8a5d63013..a512bc8369bccc 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -2400,10 +2400,11 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode( } case AArch64ISD::BICi: { // Compute the bit cleared value. - uint64_t Mask = - ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2)); + APInt Mask = + ~(Op->getConstantOperandAPInt(1) << Op->getConstantOperandAPInt(2)) + .trunc(Known.getBitWidth()); Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1); - Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask)); + Known &= KnownBits::makeConstant(Mask); break; } case AArch64ISD::VLSHR: { @@ -12839,7 +12840,8 @@ static bool isEXTMask(ArrayRef M, EVT VT, bool &ReverseEXT, // Benefit form APInt to handle overflow when calculating expected element. unsigned NumElts = VT.getVectorNumElements(); unsigned MaskBits = APInt(32, NumElts * 2).logBase2(); - APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1); + APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1, /*isSigned=*/false, + /*implicitTrunc=*/true); // The following shuffle indices must be the successive elements after the // first real element. bool FoundWrongElt = std::any_of(FirstRealElt + 1, M.end(), [&](int Elt) { @@ -14306,9 +14308,9 @@ static SDValue NormalizeBuildVector(SDValue Op, // (with operands cast to integers), then the only possibilities // are constants and UNDEFs. if (auto *CstLane = dyn_cast(Lane)) { - APInt LowBits(EltTy.getSizeInBits(), - CstLane->getZExtValue()); - Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32); + Lane = DAG.getConstant( + CstLane->getAPIntValue().trunc(EltTy.getSizeInBits()).getZExtValue(), + dl, MVT::i32); } else if (Lane.getNode()->isUndef()) { Lane = DAG.getUNDEF(MVT::i32); } else { @@ -23713,7 +23715,7 @@ static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N, EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32); // Stride does not scale explicitly by 'Scale', because it happens in // the gather/scatter addressing mode. - Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride)); + Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride, true)); return true; } @@ -28729,7 +28731,7 @@ static SDValue GenerateFixedLengthSVETBL(SDValue Op, SDValue Op1, SDValue Op2, unsigned BitsPerElt = VTOp1.getVectorElementType().getSizeInBits(); unsigned IndexLen = MinSVESize / BitsPerElt; unsigned ElementsPerVectorReg = VTOp1.getVectorNumElements(); - uint64_t MaxOffset = APInt(BitsPerElt, -1, false).getZExtValue(); + uint64_t MaxOffset = APInt(BitsPerElt, -1, true).getZExtValue(); EVT MaskEltType = VTOp1.getVectorElementType().changeTypeToInteger(); EVT MaskType = EVT::getVectorVT(*DAG.getContext(), MaskEltType, IndexLen); bool MinMaxEqual = (MinSVESize == MaxSVESize); @@ -29087,16 +29089,14 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode( KnownBits KnownOp0 = TLO.DAG.computeKnownBits(Op0, OriginalDemandedElts, Depth + 1); // Op0 &= ~(ConstantOperandVal(1) << ConstantOperandVal(2)) - uint64_t BitsToClear = Op->getConstantOperandVal(1) - << Op->getConstantOperandVal(2); + APInt BitsToClear = + (Op->getConstantOperandAPInt(1) << Op->getConstantOperandAPInt(2)) + .trunc(KnownOp0.getBitWidth()); APInt AlreadyZeroedBitsToClear = BitsToClear & KnownOp0.Zero; - if (APInt(Known.getBitWidth(), BitsToClear) - .isSubsetOf(AlreadyZeroedBitsToClear)) + if (BitsToClear.isSubsetOf(AlreadyZeroedBitsToClear)) return TLO.CombineTo(Op, Op0); - Known = KnownOp0 & - KnownBits::makeConstant(APInt(Known.getBitWidth(), ~BitsToClear)); - + Known = KnownOp0 & KnownBits::makeConstant(~BitsToClear); return false; } case ISD::INTRINSIC_WO_CHAIN: { diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index d676d561d08180..b378c894510380 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3441,7 +3441,8 @@ bool SIInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, : AMDGPU::V_MOV_B32_e32 : Is64Bit ? AMDGPU::S_MOV_B64_IMM_PSEUDO : AMDGPU::S_MOV_B32; - APInt Imm(Is64Bit ? 64 : 32, getImmFor(UseMI.getOperand(1))); + APInt Imm(Is64Bit ? 64 : 32, getImmFor(UseMI.getOperand(1)), + /*isSigned=*/true, /*implicitTrunc=*/true); if (RI.isAGPR(*MRI, DstReg)) { if (Is64Bit || !isInlineConstant(Imm)) diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp index 78267d402b6c9e..f0b0e378ad668d 100644 --- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -213,12 +213,12 @@ static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII, // that SCC is not live as S_NOT_B32 clobbers it. It's probably not worth // it, as the reasonable values are already covered by s_movk_i32. ModifiedImm = ~SrcImm; - if (TII->isInlineConstant(APInt(32, ModifiedImm))) + if (TII->isInlineConstant(APInt(32, ModifiedImm, true))) return AMDGPU::V_NOT_B32_e32; } ModifiedImm = reverseBits(SrcImm); - if (TII->isInlineConstant(APInt(32, ModifiedImm))) + if (TII->isInlineConstant(APInt(32, ModifiedImm, true))) return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32; return 0; diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 75fb90477f8854..377b4aaebebc99 100644 --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -1159,7 +1159,9 @@ class ARMOperand : public MCParsedAsmOperand { if (!isImm()) return false; const MCConstantExpr *CE = dyn_cast(getImm()); if (!CE) return false; - int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); + // TODO: Is implicitTrunc correct here? + int Val = ARM_AM::getFP32Imm( + APInt(32, CE->getValue(), /*isSigned=*/true, /*implicitTrunc=*/true)); return Val != -1; } diff --git a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp index dae316ccb5e903..f68444c0b8d462 100644 --- a/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp +++ b/llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp @@ -2503,7 +2503,8 @@ APInt HexagonConstEvaluator::getCmpImm(unsigned Opc, unsigned OpX, } uint64_t Val = MO.getImm(); - return APInt(32, Val, Signed); + // TODO: Is implicitTrunc correct here? + return APInt(32, Val, Signed, /*implicitTrunc=*/true); } void HexagonConstEvaluator::replaceWithNop(MachineInstr &MI) { diff --git a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp index 65bbb1364488f7..b16ab3931b286f 100644 --- a/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp +++ b/llvm/lib/Target/Hexagon/HexagonGenExtract.cpp @@ -171,7 +171,7 @@ bool HexagonGenExtract::convert(Instruction *In) { // this value. if (!LogicalSR && (SR > SL)) return false; - APInt A = APInt(BW, ~0ULL).lshr(SR).shl(SL); + APInt A = APInt(BW, ~0ULL, true).lshr(SR).shl(SL); CM = ConstantInt::get(Ctx, A); } diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index cde690793f0702..f9ad33a9f1504a 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -3442,7 +3442,9 @@ static std::optional isSimpleVIDSequence(SDValue Op, if (!Elt) continue; APInt ExpectedVal = - (APInt(EltSizeInBits, Idx) * *SeqStepNum).sdiv(*SeqStepDenom); + (APInt(EltSizeInBits, Idx, /*isSigned=*/false, /*implicitTrunc=*/true) * + *SeqStepNum) + .sdiv(*SeqStepDenom); APInt Addend = *Elt - ExpectedVal; if (!SeqAddend) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 4c16d2eaac4cd2..9da232ba96cc15 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -52738,8 +52738,8 @@ static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG, if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) { KnownBits XORRHS = DAG.computeKnownBits(XOR.getOperand(1)); if (XORRHS.isConstant()) { - APInt ConjugationInt32 = APInt(32, 0x80000000, true); - APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true); + APInt ConjugationInt32 = APInt(32, 0x80000000); + APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL); if ((XORRHS.getBitWidth() == 32 && XORRHS.getConstant() == ConjugationInt32) || (XORRHS.getBitWidth() == 64 && @@ -52778,7 +52778,7 @@ static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG, Flags.hasNoSignedZeros(); }; auto IsVectorAllNegativeZero = [&DAG](SDValue Op) { - APInt AI = APInt(32, 0x80008000, true); + APInt AI = APInt(32, 0x80008000); KnownBits Bits = DAG.computeKnownBits(Op); return Bits.getBitWidth() == 32 && Bits.isConstant() && Bits.getConstant() == AI; diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index afc13232ff195b..ac3f2bab5b096c 100644 --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -101,7 +101,8 @@ using OffsetAndArgPart = std::pair; static Value *createByteGEP(IRBuilderBase &IRB, const DataLayout &DL, Value *Ptr, Type *ResElemTy, int64_t Offset) { if (Offset != 0) { - APInt APOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset); + APInt APOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset, + /*isSigned=*/true); Ptr = IRB.CreatePtrAdd(Ptr, IRB.getInt(APOffset)); } return Ptr; diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 566ae2cf1936e9..72228b445a8b6e 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -7243,7 +7243,7 @@ static bool reduceSwitchRange(SwitchInst *SI, IRBuilder<> &Builder, for (auto Case : SI->cases()) { auto *Orig = Case.getCaseValue(); - auto Sub = Orig->getValue() - APInt(Ty->getBitWidth(), Base); + auto Sub = Orig->getValue() - APInt(Ty->getBitWidth(), Base, true); Case.setValue(cast(ConstantInt::get(Ty, Sub.lshr(Shift)))); } return true; diff --git a/llvm/unittests/ADT/APFixedPointTest.cpp b/llvm/unittests/ADT/APFixedPointTest.cpp index e7aa58a8325773..b71c5e16a915ee 100644 --- a/llvm/unittests/ADT/APFixedPointTest.cpp +++ b/llvm/unittests/ADT/APFixedPointTest.cpp @@ -240,19 +240,20 @@ void CheckIntPart(const FixedPointSemantics &Sema, int64_t IntPart) { APFixedPoint ValWithFract( APInt(Sema.getWidth(), relativeShr(IntPart, Sema.getLsbWeight()) + FullFactPart, - Sema.isSigned()), + Sema.isSigned(), /*implicitTrunc=*/true), Sema); ASSERT_EQ(ValWithFract.getIntPart(), IntPart); // Just fraction - APFixedPoint JustFract(APInt(Sema.getWidth(), FullFactPart, Sema.isSigned()), + APFixedPoint JustFract(APInt(Sema.getWidth(), FullFactPart, Sema.isSigned(), + /*implicitTrunc=*/true), Sema); ASSERT_EQ(JustFract.getIntPart(), 0); // Whole number APFixedPoint WholeNum(APInt(Sema.getWidth(), relativeShr(IntPart, Sema.getLsbWeight()), - Sema.isSigned()), + Sema.isSigned(), /*implicitTrunc=*/true), Sema); ASSERT_EQ(WholeNum.getIntPart(), IntPart); @@ -260,7 +261,7 @@ void CheckIntPart(const FixedPointSemantics &Sema, int64_t IntPart) { if (Sema.isSigned()) { APFixedPoint Negative(APInt(Sema.getWidth(), relativeShr(IntPart, Sema.getLsbWeight()), - Sema.isSigned()), + Sema.isSigned(), /*implicitTrunc=*/true), Sema); ASSERT_EQ(Negative.getIntPart(), IntPart); }