[llvm] 57384ae - [ConstantFold] Avoid creating undesirable cast expressions

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 1 08:51:01 PDT 2023


Author: Nikita Popov
Date: 2023-11-01T16:50:52+01:00
New Revision: 57384aeb3743dc2b5540cedd965977b6a67ae01f

URL: https://github.com/llvm/llvm-project/commit/57384aeb3743dc2b5540cedd965977b6a67ae01f
DIFF: https://github.com/llvm/llvm-project/commit/57384aeb3743dc2b5540cedd965977b6a67ae01f.diff

LOG: [ConstantFold] Avoid creating undesirable cast expressions

Similar to what we do for binops, for undesirable casts we should
call the constant folding API instead of the constant expr API,
to avoid indirect creation of undesirable cast ops.

Added: 
    

Modified: 
    llvm/lib/IR/ConstantFold.cpp
    llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
    llvm/test/CodeGen/X86/codegen-prepare-extload.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp
index d35106ae02b0dd0..15a74859045183b 100644
--- a/llvm/lib/IR/ConstantFold.cpp
+++ b/llvm/lib/IR/ConstantFold.cpp
@@ -295,6 +295,13 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
   }
 }
 
+static Constant *foldMaybeUndesirableCast(unsigned opc, Constant *V,
+                                          Type *DestTy) {
+  return ConstantExpr::isDesirableCastOp(opc)
+             ? ConstantExpr::getCast(opc, V, DestTy)
+             : ConstantFoldCastInstruction(opc, V, DestTy);
+}
+
 Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
                                             Type *DestTy) {
   if (isa<PoisonValue>(V))
@@ -320,7 +327,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
     if (CE->isCast()) {
       // Try hard to fold cast of cast because they are often eliminable.
       if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy))
-        return ConstantExpr::getCast(newOpc, CE->getOperand(0), DestTy);
+        return foldMaybeUndesirableCast(newOpc, CE->getOperand(0), DestTy);
     } else if (CE->getOpcode() == Instruction::GetElementPtr &&
                // Do not fold addrspacecast (gep 0, .., 0). It might make the
                // addrspacecast uncanonicalized.
@@ -357,18 +364,22 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
     Type *DstEltTy = DestVecTy->getElementType();
     // Fast path for splatted constants.
     if (Constant *Splat = V->getSplatValue()) {
+      Constant *Res = foldMaybeUndesirableCast(opc, Splat, DstEltTy);
+      if (!Res)
+        return nullptr;
       return ConstantVector::getSplat(
-          cast<VectorType>(DestTy)->getElementCount(),
-          ConstantExpr::getCast(opc, Splat, DstEltTy));
+          cast<VectorType>(DestTy)->getElementCount(), Res);
     }
     SmallVector<Constant *, 16> res;
     Type *Ty = IntegerType::get(V->getContext(), 32);
     for (unsigned i = 0,
                   e = cast<FixedVectorType>(V->getType())->getNumElements();
          i != e; ++i) {
-      Constant *C =
-        ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
-      res.push_back(ConstantExpr::getCast(opc, C, DstEltTy));
+      Constant *C = ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
+      Constant *Casted = foldMaybeUndesirableCast(opc, C, DstEltTy);
+      if (!Casted)
+        return nullptr;
+      res.push_back(Casted);
     }
     return ConstantVector::get(res);
   }

diff  --git a/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll b/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
index 889a76b37ebe1cc..f5b853faed8a2aa 100644
--- a/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-codegen-prepare-extload.ll
@@ -338,7 +338,8 @@ entry:
 ; OPTALL: [[LD:%[a-zA-Z_0-9-]+]] = load i16, ptr %addr
 ;
 ; OPT-NEXT: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i16 [[LD]] to i32
-; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i32)
+; OPT-NEXT: [[SEXT2:%[a-zA-Z_0-9-]+]] = sext i16 zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i16) to i32
+; OPT-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = add nuw nsw i32 [[SEXT]], [[SEXT2]]
 ;
 ; DISABLE-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = add nuw nsw i16 [[LD]], zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i16)
 ; DISABLE-NEXT: [[RES:%[a-zA-Z_0-9-]+]] = sext i16 [[ADD]] to i32

diff  --git a/llvm/test/CodeGen/X86/codegen-prepare-extload.ll b/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
index f835d4f2dbad70f..676322cd0014f9a 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
+++ b/llvm/test/CodeGen/X86/codegen-prepare-extload.ll
@@ -546,7 +546,8 @@ define i32 @promotionOfArgEndsUpInValue(ptr %addr) {
 ; OPT-NEXT:  entry:
 ; OPT-NEXT:    [[VAL:%.*]] = load i16, ptr [[ADDR]], align 2
 ; OPT-NEXT:    [[CONV3:%.*]] = sext i16 [[VAL]] to i32
-; OPT-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[CONV3]], zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i32)
+; OPT-NEXT:    [[PROMOTED:%.*]] = sext i16 zext (i1 icmp ne (ptr getelementptr inbounds ([2 x i32], ptr @c, i64 0, i64 1), ptr @a) to i16) to i32
+; OPT-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[CONV3]], [[PROMOTED]]
 ; OPT-NEXT:    ret i32 [[ADD]]
 ;
 ; DISABLE-LABEL: define i32 @promotionOfArgEndsUpInValue(


        


More information about the llvm-commits mailing list