[Mlir-commits] [clang] [lldb] [llvm] [mlir] [APInt] Fix APInt constructions where value does not fit bitwidth (NFCI) (PR #80309)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Sep 20 03:46:10 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-clang-codegen
Author: Nikita Popov (nikic)
<details>
<summary>Changes</summary>
This fixes all the places that hit the new assertion added in https://github.com/llvm/llvm-project/pull/106524 in tests. That is, cases where the value passed to the APInt constructor is not an N-bit signed/unsigned integer, where N is the bit width and signedness is determined by the isSigned flag.
The fixes either set the correct value for isSigned, set the implicitTrunc flag, or perform more calculations inside APInt.
Note that the assertion is currently still disabled by default, so this patch is mostly NFC.
---
Patch is 39.73 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/80309.diff
43 Files Affected:
- (modified) clang/lib/AST/ByteCode/IntegralAP.h (+4-2)
- (modified) clang/lib/CodeGen/CGVTT.cpp (+3-2)
- (modified) clang/lib/CodeGen/ItaniumCXXABI.cpp (+3-2)
- (modified) clang/lib/Parse/ParseInit.cpp (+3-1)
- (modified) clang/lib/Sema/SemaExpr.cpp (+5-2)
- (modified) clang/lib/Sema/SemaOpenMP.cpp (+3-1)
- (modified) lldb/source/Expression/DWARFExpression.cpp (+4-3)
- (modified) llvm/include/llvm/ADT/APFixedPoint.h (+3-1)
- (modified) llvm/lib/Analysis/ConstantFolding.cpp (+2-1)
- (modified) llvm/lib/Analysis/Loads.cpp (+2-4)
- (modified) llvm/lib/Analysis/MemoryBuiltins.cpp (+2)
- (modified) llvm/lib/Analysis/ScalarEvolution.cpp (+1-1)
- (modified) llvm/lib/Bitcode/Reader/BitcodeReader.cpp (+2-1)
- (modified) llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (+4-1)
- (modified) llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (+2-1)
- (modified) llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (+6-2)
- (modified) llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp (+6-2)
- (modified) llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp (+1-1)
- (modified) llvm/lib/IR/Constants.cpp (+3-1)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+16-16)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (+1-1)
- (modified) llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (+1-1)
- (modified) llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp (+1-1)
- (modified) llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp (+1-1)
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+12-5)
- (modified) llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp (+2-2)
- (modified) llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp (+3-1)
- (modified) llvm/lib/Target/Hexagon/HexagonConstPropagation.cpp (+2-1)
- (modified) llvm/lib/Target/Hexagon/HexagonGenExtract.cpp (+1-1)
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+3-1)
- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+3-3)
- (modified) llvm/lib/Transforms/IPO/ArgumentPromotion.cpp (+2-1)
- (modified) llvm/lib/Transforms/Utils/SimplifyCFG.cpp (+1-1)
- (modified) llvm/unittests/ADT/APFixedPointTest.cpp (+5-4)
- (modified) mlir/include/mlir/IR/BuiltinAttributes.td (+3-1)
- (modified) mlir/include/mlir/IR/OpImplementation.h (+2-1)
- (modified) mlir/lib/Conversion/TosaToArith/TosaToArith.cpp (+1-1)
- (modified) mlir/lib/Dialect/ControlFlow/IR/ControlFlowOps.cpp (+1-1)
- (modified) mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp (+1-1)
- (modified) mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp (+1-1)
- (modified) mlir/lib/IR/Builders.cpp (+12-4)
- (modified) mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp (+4-2)
- (modified) mlir/unittests/Dialect/SPIRV/SerializationTest.cpp (+1-1)
``````````diff
diff --git a/clang/lib/AST/ByteCode/IntegralAP.h b/clang/lib/AST/ByteCode/IntegralAP.h
index a4d656433344b7..6ab3d09ec85d5b 100644
--- a/clang/lib/AST/ByteCode/IntegralAP.h
+++ b/clang/lib/AST/ByteCode/IntegralAP.h
@@ -61,7 +61,7 @@ template <bool Signed> class IntegralAP final {
IntegralAP(APInt V) : V(V) {}
/// Arbitrary value for uninitialized variables.
- IntegralAP() : IntegralAP(-1, 3) {}
+ IntegralAP() : IntegralAP(Signed ? -1 : 7, 3) {}
IntegralAP operator-() const { return IntegralAP(-V); }
IntegralAP operator-(const IntegralAP &Other) const {
@@ -112,7 +112,9 @@ template <bool Signed> class IntegralAP final {
template <unsigned Bits, bool InputSigned>
static IntegralAP from(Integral<Bits, InputSigned> I, unsigned BitWidth) {
- APInt Copy = APInt(BitWidth, static_cast<uint64_t>(I), InputSigned);
+ // TODO: Avoid implicit trunc?
+ APInt Copy = APInt(BitWidth, static_cast<uint64_t>(I), InputSigned,
+ /*implicitTrunc=*/true);
return IntegralAP<Signed>(Copy);
}
diff --git a/clang/lib/CodeGen/CGVTT.cpp b/clang/lib/CodeGen/CGVTT.cpp
index 20bd2c2fc2c642..989a07d09d50ee 100644
--- a/clang/lib/CodeGen/CGVTT.cpp
+++ b/clang/lib/CodeGen/CGVTT.cpp
@@ -85,8 +85,9 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
cast<llvm::StructType>(VTable->getValueType())
->getElementType(AddressPoint.VTableIndex));
unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
- llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
- llvm::APInt(32, VTableSize - Offset, true));
+ llvm::ConstantRange InRange(
+ llvm::APInt(32, (int)-Offset, true),
+ llvm::APInt(32, (int)(VTableSize - Offset), true));
llvm::Constant *Init = llvm::ConstantExpr::getGetElementPtr(
VTable->getValueType(), VTable, Idxs, /*InBounds=*/true, InRange);
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index dcc35d5689831e..ff018fa22db866 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -2099,8 +2099,9 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
unsigned VTableSize =
ComponentSize * Layout.getVTableSize(AddressPoint.VTableIndex);
unsigned Offset = ComponentSize * AddressPoint.AddressPointIndex;
- llvm::ConstantRange InRange(llvm::APInt(32, -Offset, true),
- llvm::APInt(32, VTableSize - Offset, true));
+ llvm::ConstantRange InRange(
+ llvm::APInt(32, (int)-Offset, true),
+ llvm::APInt(32, (int)(VTableSize - Offset), true));
return llvm::ConstantExpr::getGetElementPtr(
VTable->getValueType(), VTable, Indices, /*InBounds=*/true, InRange);
}
diff --git a/clang/lib/Parse/ParseInit.cpp b/clang/lib/Parse/ParseInit.cpp
index 0a9a359cdaf979..e7c8d79ccccac3 100644
--- a/clang/lib/Parse/ParseInit.cpp
+++ b/clang/lib/Parse/ParseInit.cpp
@@ -437,7 +437,9 @@ ExprResult Parser::createEmbedExpr() {
SourceLocation StartLoc = ConsumeAnnotationToken();
if (Data->BinaryData.size() == 1) {
Res = IntegerLiteral::Create(Context,
- llvm::APInt(CHAR_BIT, Data->BinaryData.back()),
+ llvm::APInt(CHAR_BIT, Data->BinaryData.back(),
+ /*isSigned=*/false,
+ /*implicitTrunc=*/true),
Context.UnsignedCharTy, StartLoc);
} else {
auto CreateStringLiteralFromStringRef = [&](StringRef Str, QualType Ty) {
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 2f7e9c754ce095..b830d6a5ae10d0 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -3590,8 +3590,11 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) {
unsigned IntSize = Context.getTargetInfo().getIntWidth();
- return IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val),
- Context.IntTy, Loc);
+ // TODO: Avoid implicit trunc?
+ return IntegerLiteral::Create(
+ Context,
+ llvm::APInt(IntSize, Val, /*isSigned=*/false, /*implicitTrunc=*/true),
+ Context.IntTy, Loc);
}
static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 9afb8cea26fe78..454a5b41d2c17e 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -5699,7 +5699,9 @@ StmtResult SemaOpenMP::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
llvm_unreachable("unhandled unary increment operator");
}
Step = IntegerLiteral::Create(
- Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction), LogicalTy, {});
+ Ctx,
+ llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction, /*isSigned=*/true),
+ LogicalTy, {});
} else if (auto *IncBin = dyn_cast<BinaryOperator>(Inc)) {
if (IncBin->getOpcode() == BO_AddAssign) {
Step = IncBin->getRHS();
diff --git a/lldb/source/Expression/DWARFExpression.cpp b/lldb/source/Expression/DWARFExpression.cpp
index 22d899f799d0fd..564ddd098c4af6 100644
--- a/lldb/source/Expression/DWARFExpression.cpp
+++ b/lldb/source/Expression/DWARFExpression.cpp
@@ -860,10 +860,11 @@ llvm::Expected<Value> DWARFExpression::Evaluate(
// TODO: Implement a real typed stack, and store the genericness of the value
// there.
auto to_generic = [&](auto v) {
+ // TODO: Avoid implicit trunc?
bool is_signed = std::is_signed<decltype(v)>::value;
- return Scalar(llvm::APSInt(
- llvm::APInt(8 * opcodes.GetAddressByteSize(), v, is_signed),
- !is_signed));
+ return Scalar(llvm::APSInt(llvm::APInt(8 * opcodes.GetAddressByteSize(), v,
+ is_signed, /*implicitTrunc=*/true),
+ !is_signed));
};
// The default kind is a memory location. This is updated by any
diff --git a/llvm/include/llvm/ADT/APFixedPoint.h b/llvm/include/llvm/ADT/APFixedPoint.h
index ae40db96e4818c..09d4df6d980649 100644
--- a/llvm/include/llvm/ADT/APFixedPoint.h
+++ b/llvm/include/llvm/ADT/APFixedPoint.h
@@ -159,7 +159,9 @@ class APFixedPoint {
}
APFixedPoint(uint64_t Val, const FixedPointSemantics &Sema)
- : APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned()), Sema) {}
+ : APFixedPoint(APInt(Sema.getWidth(), Val, Sema.isSigned(),
+ /*implicitTrunc=*/true),
+ Sema) {}
// Zero initialization.
APFixedPoint(const FixedPointSemantics &Sema) : APFixedPoint(0, Sema) {}
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index a7a6de3f3b97b0..a05795c1267bac 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -887,7 +887,8 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
APInt Offset = APInt(
BitWidth,
DL.getIndexedOffsetInType(
- SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)));
+ SrcElemTy, ArrayRef((Value *const *)Ops.data() + 1, Ops.size() - 1)),
+ /*isSigned=*/true, /*implicitTrunc=*/true);
std::optional<ConstantRange> InRange = GEP->getInRange();
if (InRange)
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 957ac883490c45..9dcebf424f8e33 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -95,10 +95,8 @@ static bool isDereferenceableAndAlignedPointer(
auto IsKnownDeref = [&]() {
bool CheckForNonNull, CheckForFreed;
- APInt KnownDerefBytes(Size.getBitWidth(),
- V->getPointerDereferenceableBytes(DL, CheckForNonNull,
- CheckForFreed));
- if (!KnownDerefBytes.getBoolValue() || !KnownDerefBytes.uge(Size) ||
+ if (!Size.ule(V->getPointerDereferenceableBytes(DL, CheckForNonNull,
+ CheckForFreed)) ||
CheckForFreed)
return false;
if (CheckForNonNull &&
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index e1abf5e4d885ec..dc2dc4c1733b5e 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -767,6 +767,8 @@ SizeOffsetAPInt ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType());
if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min)
return ObjectSizeOffsetVisitor::unknown();
+ if (!isUIntN(IntTyBits, ElemSize.getKnownMinValue()))
+ return ObjectSizeOffsetVisitor::unknown();
APInt Size(IntTyBits, ElemSize.getKnownMinValue());
if (!I.isArrayAllocation())
return SizeOffsetAPInt(align(Size, I.getAlign()), Zero);
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 1d3443588ce60d..5670a3981b99c7 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -6874,7 +6874,7 @@ const ConstantRange &ScalarEvolution::getRangeRef(
bool CanBeNull, CanBeFreed;
uint64_t DerefBytes =
V->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
- if (DerefBytes > 1) {
+ if (DerefBytes > 1 && isUIntN(BitWidth, DerefBytes)) {
// The highest address the object can start is DerefBytes bytes before
// the end (unsigned max value). If this value is not a multiple of the
// alignment, the last possible start value is the next lowest multiple
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 6f997510b03609..f66b89e79f9d8b 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -858,7 +858,8 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
} else {
int64_t Start = BitcodeReader::decodeSignRotatedValue(Record[OpNum++]);
int64_t End = BitcodeReader::decodeSignRotatedValue(Record[OpNum++]);
- return ConstantRange(APInt(BitWidth, Start), APInt(BitWidth, End));
+ return ConstantRange(APInt(BitWidth, Start, true),
+ APInt(BitWidth, End, true));
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 9b96dbb666198a..bc925865b678ef 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1641,7 +1641,10 @@ SDValue SelectionDAG::getConstant(uint64_t Val, const SDLoc &DL, EVT VT,
assert((EltVT.getSizeInBits() >= 64 ||
(uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
"getConstant with a uint64_t value that doesn't fit in the type!");
- return getConstant(APInt(EltVT.getSizeInBits(), Val), DL, VT, isT, isO);
+ // TODO: Avoid implicit trunc?
+ return getConstant(APInt(EltVT.getSizeInBits(), Val, /*isSigned=*/false,
+ /*implicitTrunc=*/true),
+ DL, VT, isT, isO);
}
SDValue SelectionDAG::getConstant(const APInt &Val, const SDLoc &DL, EVT VT,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 25213f587116d5..684c6f8962b613 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4338,7 +4338,8 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
GTI.getSequentialElementStride(DAG.getDataLayout());
// We intentionally mask away the high bits here; ElementSize may not
// fit in IdxTy.
- APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
+ APInt ElementMul(IdxSize, ElementSize.getKnownMinValue(),
+ /*isSigned=*/false, /*implicitTrunc=*/true);
bool ElementScalable = ElementSize.isScalable();
// If this is a scalar constant or a splat vector of constants,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 2a97580942df36..119c30a18681cd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -2200,7 +2200,9 @@ ScheduleDAGSDNodes *SelectionDAGISel::CreateScheduler() {
bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
- const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
+ // TODO: Avoid implicit trunc?
+ const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS,
+ /*isSigned=*/false, /*implicitTrunc=*/true);
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
@@ -2229,7 +2231,9 @@ bool SelectionDAGISel::CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
bool SelectionDAGISel::CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
int64_t DesiredMaskS) const {
const APInt &ActualMask = RHS->getAPIntValue();
- const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS);
+ // TODO: Avoid implicit trunc?
+ const APInt &DesiredMask = APInt(LHS.getValueSizeInBits(), DesiredMaskS,
+ /*isSigned=*/false, /*implicitTrunc=*/true);
// If the actual mask exactly matches, success!
if (ActualMask == DesiredMask)
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index a2a232ed93b72f..b864b0c54d6602 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -6813,7 +6813,9 @@ TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
PAmts.push_back(DAG.getConstant(P, DL, SVT));
KAmts.push_back(
- DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
+ DAG.getConstant(APInt(ShSVT.getSizeInBits(), K, /*isSigned=*/false,
+ /*implicitTrunc=*/true),
+ DL, ShSVT));
QAmts.push_back(DAG.getConstant(Q, DL, SVT));
return true;
};
@@ -7084,7 +7086,9 @@ TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
PAmts.push_back(DAG.getConstant(P, DL, SVT));
AAmts.push_back(DAG.getConstant(A, DL, SVT));
KAmts.push_back(
- DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT));
+ DAG.getConstant(APInt(ShSVT.getSizeInBits(), K, /*isSigned=*/false,
+ /*implicitTrunc=*/true),
+ DL, ShSVT));
QAmts.push_back(DAG.getConstant(Q, DL, SVT));
return true;
};
diff --git a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
index 4cce4a77b343f0..e3b7db2380bb00 100644
--- a/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
+++ b/llvm/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -588,7 +588,7 @@ GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
return rv;
}
case Type::VoidTyID:
- rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
+ rv.IntVal = APInt(32, ((int (*)())(intptr_t)FPtr)(), true);
return rv;
case Type::FloatTyID:
rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp
index d6c00a4b547829..63327db2705095 100644
--- a/llvm/lib/IR/Constants.cpp
+++ b/llvm/lib/IR/Constants.cpp
@@ -932,7 +932,9 @@ Constant *ConstantInt::get(Type *Ty, uint64_t V, bool isSigned) {
}
ConstantInt *ConstantInt::get(IntegerType *Ty, uint64_t V, bool isSigned) {
- return get(Ty->getContext(), APInt(Ty->getBitWidth(), V, isSigned));
+ // TODO: Avoid implicit trunc?
+ return get(Ty->getContext(),
+ APInt(Ty->getBitWidth(), V, isSigned, /*implicitTrunc=*/true));
}
Constant *ConstantInt::get(Type *Ty, const APInt& V) {
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b11ac81069f660..497d1f0f02b5ef 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2359,10 +2359,11 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode(
}
case AArch64ISD::BICi: {
// Compute the bit cleared value.
- uint64_t Mask =
- ~(Op->getConstantOperandVal(1) << Op->getConstantOperandVal(2));
+ APInt Mask =
+ ~(Op->getConstantOperandAPInt(1) << Op->getConstantOperandAPInt(2))
+ .trunc(Known.getBitWidth());
Known = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
- Known &= KnownBits::makeConstant(APInt(Known.getBitWidth(), Mask));
+ Known &= KnownBits::makeConstant(Mask);
break;
}
case AArch64ISD::VLSHR: {
@@ -12706,7 +12707,8 @@ static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
// Benefit form APInt to handle overflow when calculating expected element.
unsigned NumElts = VT.getVectorNumElements();
unsigned MaskBits = APInt(32, NumElts * 2).logBase2();
- APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1);
+ APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1, /*isSigned=*/false,
+ /*implicitTrunc=*/true);
// The following shuffle indices must be the successive elements after the
// first real element.
bool FoundWrongElt = std::any_of(FirstRealElt + 1, M.end(), [&](int Elt) {
@@ -14173,9 +14175,9 @@ static SDValue NormalizeBuildVector(SDValue Op,
// (with operands cast to integers), then the only possibilities
// are constants and UNDEFs.
if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
- APInt LowBits(EltTy.getSizeInBits(),
- CstLane->getZExtValue());
- Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32);
+ Lane = DAG.getConstant(
+ CstLane->getAPIntValue().trunc(EltTy.getSizeInBits()).getZExtValue(),
+ dl, MVT::i32);
} else if (Lane.getNode()->isUndef()) {
Lane = DAG.getUNDEF(MVT::i32);
} else {
@@ -23978,7 +23980,7 @@ static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
EVT NewIndexVT = IndexVT.changeVectorElementType(MVT::i32);
// Stride does not scale explicitly by 'Scale', because it happens in
// the gather/scatter addressing mode.
- Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride));
+ Index = DAG.getStepVector(SDLoc(N), NewIndexVT, APInt(32, Stride, true));
return true;
}
@@ -28978,7 +28980,7 @@ static SDValue GenerateFixedLengthSVETBL(SDValue Op, SDValue Op1, SDValue Op2,
unsigned BitsPerElt = VTOp1.getVectorElementType().getSizeInBits();
unsigned IndexLen = MinSVESize / BitsPerElt;
unsigned ElementsPerVectorReg = VTOp1.getVectorNumElements();
- uint64_t MaxOffset = APInt(BitsPerElt, -1, false).getZExtValue();
+ uint64_t MaxOffset = APInt(BitsPerElt, -1, true).getZExtValue();
EVT MaskEltType = VTOp1.getVectorElementType().changeTypeToInteger();
EVT MaskType = EVT::getVectorVT(*DAG.getContext(), MaskEltType, IndexLen);
bool MinMaxEqual = (MinSVESize == MaxSVESize);
@@ -29336,16 +29338,14 @@ bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
KnownBits KnownOp0 =
TLO.DAG.computeKnownBits(Op0, OriginalDemandedElts, Depth + 1);
// Op0 &= ~(ConstantOperandVal(1) << ConstantOperandVal(2))
- uint64_t BitsToClear = Op->getConstantOperandVal(1)
- << Op->getConstantOperandVal(2);
+ APInt BitsToClear =
+ (Op->getConstantOperandAPInt(1) << Op->getConstantOperandAPInt(2))
+ .trunc(KnownOp0.getBitWidth());
APInt AlreadyZeroedBitsToClear = BitsToClear & KnownOp0.Zero;
- if (APInt(Known.getBitWidth(), BitsToClear)
- .isSubsetOf(AlreadyZeroedBitsToClear))
+ if (BitsToClear.isSubsetOf(AlreadyZeroedBitsToClear))
return TLO.CombineTo(Op, Op0);
- Known = KnownOp0 &
- KnownBits::makeConstant(APInt(Known.getBitWidth(), ~BitsToClear));
-
+ Known = KnownOp0 & KnownBits::m...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/80309
More information about the Mlir-commits
mailing list