[llvm] 17fb3e8 - [VPlan] Skip extending ICmp results in trunateToMinimalBitwidth.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 23 03:50:57 PDT 2024
Author: Florian Hahn
Date: 2024-04-23T11:50:26+01:00
New Revision: 17fb3e82f6c950267bb01cc2fd2a84b0d9e9d0d8
URL: https://github.com/llvm/llvm-project/commit/17fb3e82f6c950267bb01cc2fd2a84b0d9e9d0d8
DIFF: https://github.com/llvm/llvm-project/commit/17fb3e82f6c950267bb01cc2fd2a84b0d9e9d0d8.diff
LOG: [VPlan] Skip extending ICmp results in trunateToMinimalBitwidth.
Results of icmp don't need extending after truncating their operands, as
the result will always be i1. Skip them during extending.
Fixes https://github.com/llvm/llvm-project/issues/79742
Fixes https://github.com/llvm/llvm-project/issues/85185
Added:
llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll
Modified:
llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 007dc3f89b3fb9..9580dc4a27f615 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1031,7 +1031,9 @@ void VPlanTransforms::truncateToMinimalBitwidths(
if (auto *VPW = dyn_cast<VPRecipeWithIRFlags>(&R))
VPW->dropPoisonGeneratingFlags();
- if (OldResSizeInBits != NewResSizeInBits) {
+ using namespace llvm::VPlanPatternMatch;
+ if (OldResSizeInBits != NewResSizeInBits &&
+ !match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue()))) {
// Extend result to original width.
auto *Ext =
new VPWidenCastRecipe(Instruction::ZExt, ResultVPV, OldResTy);
@@ -1040,8 +1042,9 @@ void VPlanTransforms::truncateToMinimalBitwidths(
Ext->setOperand(0, ResultVPV);
assert(OldResSizeInBits > NewResSizeInBits && "Nothing to shrink?");
} else
- assert(cast<VPWidenRecipe>(&R)->getOpcode() == Instruction::ICmp &&
- "Only ICmps should not need extending the result.");
+ assert(
+ match(&R, m_Binary<Instruction::ICmp>(m_VPValue(), m_VPValue())) &&
+ "Only ICmps should not need extending the result.");
assert(!isa<VPWidenStoreRecipe>(&R) && "stores cannot be narrowed");
if (isa<VPWidenLoadRecipe>(&R))
diff --git a/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll
new file mode 100644
index 00000000000000..33fc1f70bf3c09
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/trunc-extended-icmps.ll
@@ -0,0 +1,211 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -p loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S %s | FileCheck %s
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+
+define i32 @test_icmp_constant_op_zext(ptr %dst) {
+; CHECK-LABEL: define i32 @test_icmp_constant_op_zext(
+; CHECK-SAME: ptr [[DST:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[INDEX]] to i16
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i16 1, [[DOTCAST]]
+; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[DST]], i16 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 0
+; CHECK-NEXT: store <4 x i8> <i8 109, i8 109, i8 109, i8 109>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
+; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ 997, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i64 7304878031173690989, 0
+; CHECK-NEXT: [[C_EXT:%.*]] = zext i1 [[C]] to i64
+; CHECK-NEXT: [[OR:%.*]] = or i64 7304878031173690989, [[C_EXT]]
+; CHECK-NEXT: [[OR_TRUNC:%.*]] = trunc i64 [[OR]] to i8
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i16 [[IV]]
+; CHECK-NEXT: store i8 [[OR_TRUNC]], ptr [[GEP]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 1000
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i16 [ 1, %entry ], [ %iv.next, %loop ]
+ %c = icmp ne i64 7304878031173690989, 0
+ %c.ext = zext i1 %c to i64
+ %or = or i64 7304878031173690989, %c.ext
+ %or.trunc = trunc i64 %or to i8
+ %gep = getelementptr i8, ptr %dst, i16 %iv
+ store i8 %or.trunc, ptr %gep, align 1
+ %iv.next = add i16 %iv, 1
+ %ec = icmp eq i16 %iv.next, 1000
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i32 0
+}
+
+
+define i32 @test_icmp_and_op_zext(ptr %dst, i64 %a) {
+; CHECK-LABEL: define i32 @test_icmp_and_op_zext(
+; CHECK-SAME: ptr [[DST:%.*]], i64 [[A:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], 7304878031173690989
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[AND]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i32 [[INDEX]] to i16
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i16 1, [[DOTCAST]]
+; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <4 x i64> [[BROADCAST_SPLAT]], zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i1> [[TMP1]] to <4 x i64>
+; CHECK-NEXT: [[TMP3:%.*]] = or <4 x i64> [[BROADCAST_SPLAT]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = trunc <4 x i64> [[TMP3]] to <4 x i8>
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[DST]], i16 [[TMP0]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 0
+; CHECK-NEXT: store <4 x i8> [[TMP4]], ptr [[TMP6]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996
+; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ 997, [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[C:%.*]] = icmp ne i64 [[AND]], 0
+; CHECK-NEXT: [[C_EXT:%.*]] = zext i1 [[C]] to i64
+; CHECK-NEXT: [[OR:%.*]] = or i64 [[AND]], [[C_EXT]]
+; CHECK-NEXT: [[OR_TRUNC:%.*]] = trunc i64 [[OR]] to i8
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[DST]], i16 [[IV]]
+; CHECK-NEXT: store i8 [[OR_TRUNC]], ptr [[GEP]], align 1
+; CHECK-NEXT: [[IV_NEXT]] = add i16 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i16 [[IV_NEXT]], 1000
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret i32 0
+;
+entry:
+ %and = and i64 %a, 7304878031173690989
+ br label %loop
+
+loop:
+ %iv = phi i16 [ 1, %entry ], [ %iv.next, %loop ]
+ %c = icmp ne i64 %and, 0
+ %c.ext = zext i1 %c to i64
+ %or = or i64 %and, %c.ext
+ %or.trunc = trunc i64 %or to i8
+ %gep = getelementptr i8, ptr %dst, i16 %iv
+ store i8 %or.trunc, ptr %gep, align 1
+ %iv.next = add i16 %iv, 1
+ %ec = icmp eq i16 %iv.next, 1000
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i32 0
+}
+
+define void @ext_cmp(ptr %src.1, ptr %src.2, ptr noalias %dst) {
+; CHECK-LABEL: define void @ext_cmp(
+; CHECK-SAME: ptr [[SRC_1:%.*]], ptr [[SRC_2:%.*]], ptr noalias [[DST:%.*]]) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i16, ptr [[SRC_1]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i16, ptr [[TMP1]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i16>, ptr [[TMP2]], align 2
+; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <4 x i16> zeroinitializer, [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[SRC_2]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP5]], align 2
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i8> [[WIDE_LOAD1]] to <4 x i16>
+; CHECK-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP3]], <4 x i16> zeroinitializer, <4 x i16> [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = and <4 x i16> [[TMP7]], zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i16, ptr [[TMP9]], i32 0
+; CHECK-NEXT: store <4 x i16> [[TMP8]], ptr [[TMP10]], align 2
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1000
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1000, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC_1:%.*]] = getelementptr inbounds i16, ptr [[SRC_1]], i64 [[IV]]
+; CHECK-NEXT: [[I2:%.*]] = load i16, ptr [[GEP_SRC_1]], align 2
+; CHECK-NEXT: [[I3:%.*]] = sext i16 [[I2]] to i32
+; CHECK-NEXT: [[C_1:%.*]] = icmp sgt i32 0, [[I3]]
+; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr inbounds i8, ptr [[SRC_2]], i64 [[IV]]
+; CHECK-NEXT: [[I4:%.*]] = load i8, ptr [[GEP_SRC_2]], align 2
+; CHECK-NEXT: [[I5:%.*]] = zext i8 [[I4]] to i32
+; CHECK-NEXT: [[I6:%.*]] = select i1 [[C_1]], i32 0, i32 [[I5]]
+; CHECK-NEXT: [[I7:%.*]] = and i32 [[I6]], 0
+; CHECK-NEXT: [[I8:%.*]] = trunc nuw nsw i32 [[I7]] to i16
+; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[IV]]
+; CHECK-NEXT: store i16 [[I8]], ptr [[GEP_DST]], align 2
+; CHECK-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], 1000
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src.1 = getelementptr inbounds i16, ptr %src.1, i64 %iv
+ %i2 = load i16, ptr %gep.src.1, align 2
+ %i3 = sext i16 %i2 to i32
+ %c.1 = icmp sgt i32 0, %i3
+ %gep.src.2 = getelementptr inbounds i8, ptr %src.2, i64 %iv
+ %i4 = load i8, ptr %gep.src.2, align 2
+ %i5 = zext i8 %i4 to i32
+ %i6 = select i1 %c.1, i32 0, i32 %i5
+ %i7 = and i32 %i6, 0
+ %i8 = trunc nuw nsw i32 %i7 to i16
+ %gep.dst = getelementptr inbounds i16, ptr %dst, i64 %iv
+ store i16 %i8, ptr %gep.dst, align 2
+ %iv.next = add nsw i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 1000
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
+;.
More information about the llvm-commits
mailing list