[llvm-branch-commits] [llvm] 2ac58e2 - [InstCombine] Remove scalable vector restriction when fold SelectInst

Jun Ma via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Dec 15 04:52:13 PST 2020


Author: Jun Ma
Date: 2020-12-15T20:36:57+08:00
New Revision: 2ac58e21a115d16a91578defc4636c43e3608a2e

URL: https://github.com/llvm/llvm-project/commit/2ac58e21a115d16a91578defc4636c43e3608a2e
DIFF: https://github.com/llvm/llvm-project/commit/2ac58e21a115d16a91578defc4636c43e3608a2e.diff

LOG: [InstCombine] Remove scalable vector restriction when fold SelectInst

Differential Revision: https://reviews.llvm.org/D93083

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
    llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
    llvm/test/Transforms/InstCombine/select.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 6e94e5823433..e9ec8021e466 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -2322,13 +2322,11 @@ static Instruction *foldBitCastSelect(BitCastInst &BitCast,
   // A vector select must maintain the same number of elements in its operands.
   Type *CondTy = Cond->getType();
   Type *DestTy = BitCast.getType();
-  if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) {
-    if (!DestTy->isVectorTy())
+  if (auto *CondVTy = dyn_cast<VectorType>(CondTy))
+    if (!DestTy->isVectorTy() ||
+        CondVTy->getElementCount() !=
+            cast<VectorType>(DestTy)->getElementCount())
       return nullptr;
-    if (cast<FixedVectorType>(DestTy)->getNumElements() !=
-        cast<FixedVectorType>(CondVTy)->getNumElements())
-      return nullptr;
-  }
 
   // FIXME: This transform is restricted from changing the select between
   // scalars and vectors to avoid backend problems caused by creating

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 397cb0b0e187..c32f6ace42db 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -283,10 +283,9 @@ Instruction *InstCombinerImpl::foldSelectOpOp(SelectInst &SI, Instruction *TI,
     // The select condition may be a vector. We may only change the operand
     // type if the vector width remains the same (and matches the condition).
     if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) {
-      if (!FIOpndTy->isVectorTy())
-        return nullptr;
-      if (cast<FixedVectorType>(CondVTy)->getNumElements() !=
-          cast<FixedVectorType>(FIOpndTy)->getNumElements())
+      if (!FIOpndTy->isVectorTy() ||
+          CondVTy->getElementCount() !=
+              cast<VectorType>(FIOpndTy)->getElementCount())
         return nullptr;
 
       // TODO: If the backend knew how to deal with casts better, we could

diff  --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index 987f34e52ad2..819ac6dd3a82 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -68,6 +68,15 @@ define <2 x i1> @test8vec(<2 x i1> %C, <2 x i1> %X) {
   ret <2 x i1> %R
 }
 
+define <vscale x 2 x i1> @test8vvec(<vscale x 2 x i1> %C, <vscale x 2 x i1> %X) {
+; CHECK-LABEL: @test8vvec(
+; CHECK-NEXT:    [[R:%.*]] = and <vscale x 2 x i1> [[C:%.*]],  [[X:%.*]]
+; CHECK-NEXT:    ret <vscale x 2 x i1> [[R]]
+;
+  %R = select <vscale x 2 x i1> %C, <vscale x 2 x i1> %X, <vscale x 2 x i1> zeroinitializer
+  ret <vscale x 2 x i1> %R
+}
+
 define i1 @test9(i1 %C, i1 %X) {
 ; CHECK-LABEL: @test9(
 ; CHECK-NEXT:    [[NOT_C:%.*]] = xor i1 [[C:%.*]], true
@@ -88,6 +97,16 @@ define <2 x i1> @test9vec(<2 x i1> %C, <2 x i1> %X) {
   ret <2 x i1> %R
 }
 
+define <vscale x 2 x i1> @test9vvec(<vscale x 2 x i1> %C, <vscale x 2 x i1> %X) {
+; CHECK-LABEL: @test9vvec(
+; CHECK-NEXT:    [[NOT_C:%.*]] = xor <vscale x 2 x i1> [[C:%.*]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer)
+; CHECK-NEXT:    [[R:%.*]] = and <vscale x 2 x i1> [[NOT_C]], [[X:%.*]]
+; CHECK-NEXT:    ret <vscale x 2 x i1> [[R]]
+;
+  %R = select <vscale x 2 x i1> %C, <vscale x 2 x i1> zeroinitializer, <vscale x 2 x i1> %X
+  ret <vscale x 2 x i1> %R
+}
+
 define i1 @test10(i1 %C, i1 %X) {
 ; CHECK-LABEL: @test10(
 ; CHECK-NEXT:    [[NOT_C:%.*]] = xor i1 [[C:%.*]], true
@@ -699,6 +718,34 @@ define i48 @test51(<3 x i1> %icmp, <3 x i16> %tmp) {
   ret i48 %tmp2
 }
 
+define <vscale x 4 x float> @bitcast_select_bitcast(<vscale x 4 x i1> %icmp, <vscale x 4 x i32> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: @bitcast_select_bitcast(
+; CHECK-NEXT:    [[BC1:%.*]] = bitcast <vscale x 4 x i32> [[A:%.*]] to <vscale x 4 x float>
+; CHECK-NEXT:    [[SELECT:%.*]] = select <vscale x 4 x i1> [[ICMP:%.*]], <vscale x 4 x float> [[B:%.*]], <vscale x 4 x float> [[BC1]]
+; CHECK-NEXT:    ret <vscale x 4 x float> [[SELECT]]
+;
+  %bc1 = bitcast <vscale x 4 x float> %b to <vscale x 4 x i32>
+  %select = select <vscale x 4 x i1> %icmp, <vscale x 4 x i32> %bc1, <vscale x 4 x i32> %a
+  %bc2 = bitcast <vscale x 4 x i32> %select to <vscale x 4 x float>
+  ret <vscale x 4 x float> %bc2
+}
+
+define void @select_oneuse_bitcast(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i32> %c, <vscale x 4 x i32> %d, <vscale x 4 x i32>* %ptr1) {
+; CHECK-LABEL: @select_oneuse_bitcast(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[C:%.*]], [[D:%.*]]
+; CHECK-NEXT:    [[SEL1_V:%.*]] = select <vscale x 4 x i1> [[CMP]], <vscale x 4 x float> [[A:%.*]], <vscale x 4 x float> [[B:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <vscale x 4 x i32>* [[PTR1:%.*]] to <vscale x 4 x float>*
+; CHECK-NEXT:    store <vscale x 4 x float> [[SEL1_V]], <vscale x 4 x float>* [[TMP1]], align 16
+; CHECK-NEXT:    ret void
+;
+  %cmp = icmp ult <vscale x 4 x i32> %c, %d
+  %bc1 = bitcast <vscale x 4 x float> %a to <vscale x 4 x i32>
+  %bc2 = bitcast <vscale x 4 x float> %b to <vscale x 4 x i32>
+  %sel1 = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %bc1, <vscale x 4 x i32> %bc2
+  store <vscale x 4 x i32> %sel1, <vscale x 4 x i32>* %ptr1
+  ret void
+}
+
 ; Allow select promotion even if there are multiple uses of bitcasted ops.
 ; Hoisting the selects allows later pattern matching to see that these are min/max ops.
 
@@ -723,6 +770,27 @@ define void @min_max_bitcast(<4 x float> %a, <4 x float> %b, <4 x i32>* %ptr1, <
   ret void
 }
 
+define void @min_max_bitcast1(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i32>* %ptr1, <vscale x 4 x i32>* %ptr2) {
+; CHECK-LABEL: @min_max_bitcast1(
+; CHECK-NEXT:    [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT:    [[SEL1_V:%.*]] = select <vscale x 4 x i1> [[CMP]], <vscale x 4 x float> [[A]], <vscale x 4 x float> [[B]]
+; CHECK-NEXT:    [[SEL2_V:%.*]] = select <vscale x 4 x i1> [[CMP]], <vscale x 4 x float> [[B]], <vscale x 4 x float> [[A]]
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <vscale x 4 x i32>* [[PTR1:%.*]] to <vscale x 4 x float>*
+; CHECK-NEXT:    store <vscale x 4 x float> [[SEL1_V]], <vscale x 4 x float>* [[TMP1]], align 16
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <vscale x 4 x i32>* [[PTR2:%.*]] to <vscale x 4 x float>*
+; CHECK-NEXT:    store <vscale x 4 x float> [[SEL2_V]], <vscale x 4 x float>* [[TMP2]], align 16
+; CHECK-NEXT:    ret void
+;
+  %cmp = fcmp olt <vscale x 4 x float> %a, %b
+  %bc1 = bitcast <vscale x 4 x float> %a to <vscale x 4 x i32>
+  %bc2 = bitcast <vscale x 4 x float> %b to <vscale x 4 x i32>
+  %sel1 = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %bc1, <vscale x 4 x i32> %bc2
+  %sel2 = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %bc2, <vscale x 4 x i32> %bc1
+  store <vscale x 4 x i32> %sel1, <vscale x 4 x i32>* %ptr1
+  store <vscale x 4 x i32> %sel2, <vscale x 4 x i32>* %ptr2
+  ret void
+}
+
 ; To avoid potential backend problems, we don't do the same transform for other casts.
 
 define void @truncs_before_selects(<4 x float> %f1, <4 x float> %f2, <4 x i64> %a, <4 x i64> %b, <4 x i32>* %ptr1, <4 x i32>* %ptr2) {


        


More information about the llvm-branch-commits mailing list