[llvm] r286113 - [InstCombine] allow splat vector folds in adjustMinMax() (retry r285732)

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 7 07:52:46 PST 2016


Author: spatel
Date: Mon Nov  7 09:52:45 2016
New Revision: 286113

URL: http://llvm.org/viewvc/llvm-project?rev=286113&view=rev
Log:
[InstCombine] allow splat vector folds in adjustMinMax() (retry r285732)

This was reverted at r285866 because there was a crash handling a scalar
select of vectors. I added a check for that pattern and a test case based
on the example provided in the post-commit thread for r285732.

Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp
    llvm/trunk/test/Transforms/InstCombine/adjust-for-minmax.ll

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp?rev=286113&r1=286112&r2=286113&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineSelect.cpp Mon Nov  7 09:52:45 2016
@@ -424,24 +424,22 @@ static bool adjustMinMax(SelectInst &Sel
   Value *FalseVal = Sel.getFalseValue();
 
   // We may move or edit the compare, so make sure the select is the only user.
-  if (!Cmp.hasOneUse())
+  const APInt *CmpC;
+  if (!Cmp.hasOneUse() || !match(CmpRHS, m_APInt(CmpC)))
     return false;
 
-  // FIXME: Use m_APInt to allow vector folds.
-  auto *CI = dyn_cast<ConstantInt>(CmpRHS);
-  if (!CI)
-    return false;
-
-  // These transformations only work for selects over integers.
-  IntegerType *SelectTy = dyn_cast<IntegerType>(Sel.getType());
-  if (!SelectTy)
+  // These transforms only work for selects of integers or vector selects of
+  // integer vectors.
+  Type *SelTy = Sel.getType();
+  auto *SelEltTy = dyn_cast<IntegerType>(SelTy->getScalarType());
+  if (!SelEltTy || SelTy->isVectorTy() != Cmp.getType()->isVectorTy())
     return false;
 
   Constant *AdjustedRHS;
   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SGT)
-    AdjustedRHS = ConstantInt::get(CI->getContext(), CI->getValue() + 1);
+    AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC + 1);
   else if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT)
-    AdjustedRHS = ConstantInt::get(CI->getContext(), CI->getValue() - 1);
+    AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC - 1);
   else
     return false;
 
@@ -454,8 +452,8 @@ static bool adjustMinMax(SelectInst &Sel
   // Types do not match. Instead of calculating this with mixed types, promote
   // all to the larger type. This enables scalar evolution to analyze this
   // expression.
-  else if (CmpRHS->getType()->getScalarSizeInBits() < SelectTy->getBitWidth()) {
-    Constant *SextRHS = ConstantExpr::getSExt(AdjustedRHS, SelectTy);
+  else if (CmpRHS->getType()->getScalarSizeInBits() < SelEltTy->getBitWidth()) {
+    Constant *SextRHS = ConstantExpr::getSExt(AdjustedRHS, SelTy);
 
     // X = sext x; x >s c ? X : C+1 --> X = sext x; X <s C+1 ? C+1 : X
     // X = sext x; x <s c ? X : C-1 --> X = sext x; X >s C-1 ? C-1 : X
@@ -469,7 +467,7 @@ static bool adjustMinMax(SelectInst &Sel
       CmpLHS = FalseVal;
       AdjustedRHS = SextRHS;
     } else if (Cmp.isUnsigned()) {
-      Constant *ZextRHS = ConstantExpr::getZExt(AdjustedRHS, SelectTy);
+      Constant *ZextRHS = ConstantExpr::getZExt(AdjustedRHS, SelTy);
       // X = zext x; x >u c ? X : C+1 --> X = zext x; X <u C+1 ? C+1 : X
       // X = zext x; x <u c ? X : C-1 --> X = zext x; X >u C-1 ? C-1 : X
       // zext + signed compare cannot be changed:

Modified: llvm/trunk/test/Transforms/InstCombine/adjust-for-minmax.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/adjust-for-minmax.ll?rev=286113&r1=286112&r2=286113&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/adjust-for-minmax.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/adjust-for-minmax.ll Mon Nov  7 09:52:45 2016
@@ -68,13 +68,12 @@ define i32 @smax3(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Swap vector signed pred and select ops.
 
 define <2 x i32> @smax3_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @smax3_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp sgt <2 x i32> %n, <i32 -1, i32 -1>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[T:%.*]] = icmp slt <2 x i32> %n, zeroinitializer
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> zeroinitializer, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp sgt <2 x i32> %n, <i32 -1, i32 -1>
@@ -95,13 +94,12 @@ define i32 @smin3(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Swap vector signed pred and select ops.
 
 define <2 x i32> @smin3_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @smin3_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp slt <2 x i32> %n, <i32 1, i32 1>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[T:%.*]] = icmp sgt <2 x i32> %n, zeroinitializer
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> zeroinitializer, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp slt <2 x i32> %n, <i32 1, i32 1>
@@ -122,13 +120,12 @@ define i32 @umax3(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Swap vector unsigned pred and select ops.
 
 define <2 x i32> @umax3_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @umax3_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp ugt <2 x i32> %n, <i32 4, i32 4>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> <i32 5, i32 5>
+; CHECK-NEXT:    [[T:%.*]] = icmp ult <2 x i32> %n, <i32 5, i32 5>
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> <i32 5, i32 5>, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp ugt <2 x i32> %n, <i32 4, i32 4>
@@ -149,13 +146,12 @@ define i32 @umin3(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Swap vector unsigned pred and select ops.
 
 define <2 x i32> @umin3_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @umin3_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp ult <2 x i32> %n, <i32 7, i32 7>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> <i32 6, i32 6>
+; CHECK-NEXT:    [[T:%.*]] = icmp ugt <2 x i32> %n, <i32 6, i32 6>
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> <i32 6, i32 6>, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp ult <2 x i32> %n, <i32 7, i32 7>
@@ -176,13 +172,12 @@ define i32 @smax4(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Canonicalize vector signed pred and swap pred and select ops.
 
 define <2 x i32> @smax4_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @smax4_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp sgt <2 x i32> %n, <i32 -1, i32 -1>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[T:%.*]] = icmp slt <2 x i32> %n, zeroinitializer
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> zeroinitializer, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp sge <2 x i32> %n, zeroinitializer
@@ -203,13 +198,12 @@ define i32 @smin4(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Canonicalize vector signed pred and swap pred and select ops.
 
 define <2 x i32> @smin4_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @smin4_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp slt <2 x i32> %n, <i32 1, i32 1>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[T:%.*]] = icmp sgt <2 x i32> %n, zeroinitializer
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> zeroinitializer, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp sle <2 x i32> %n, zeroinitializer
@@ -230,13 +224,12 @@ define i32 @umax4(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Canonicalize vector unsigned pred and swap pred and select ops.
 
 define <2 x i32> @umax4_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @umax4_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp ugt <2 x i32> %n, <i32 7, i32 7>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> <i32 8, i32 8>
+; CHECK-NEXT:    [[T:%.*]] = icmp ult <2 x i32> %n, <i32 8, i32 8>
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> <i32 8, i32 8>, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp uge <2 x i32> %n, <i32 8, i32 8>
@@ -257,13 +250,12 @@ define i32 @umin4(i32 %n) {
   ret i32 %m
 }
 
-; FIXME
 ; Canonicalize vector unsigned pred and swap pred and select ops.
 
 define <2 x i32> @umin4_vec(<2 x i32> %n) {
 ; CHECK-LABEL: @umin4_vec(
-; CHECK-NEXT:    [[T:%.*]] = icmp ult <2 x i32> %n, <i32 10, i32 10>
-; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> %n, <2 x i32> <i32 9, i32 9>
+; CHECK-NEXT:    [[T:%.*]] = icmp ugt <2 x i32> %n, <i32 9, i32 9>
+; CHECK-NEXT:    [[M:%.*]] = select <2 x i1> [[T]], <2 x i32> <i32 9, i32 9>, <2 x i32> %n
 ; CHECK-NEXT:    ret <2 x i32> [[M]]
 ;
   %t = icmp ule <2 x i32> %n, <i32 9, i32 9>
@@ -284,12 +276,11 @@ define i64 @smax_sext(i32 %a) {
   ret i64 %max
 }
 
-; FIXME
 define <2 x i64> @smax_sext_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @smax_sext_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = sext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i32> %a, <i32 -1, i32 -1>
-; CHECK-NEXT:    [[MAX:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> zeroinitializer
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <2 x i64> [[A_EXT]], zeroinitializer
+; CHECK-NEXT:    [[MAX:%.*]] = select <2 x i1> [[CMP]], <2 x i64> zeroinitializer, <2 x i64> [[A_EXT]]
 ; CHECK-NEXT:    ret <2 x i64> [[MAX]]
 ;
   %a_ext = sext <2 x i32> %a to <2 x i64>
@@ -311,12 +302,11 @@ define i64 @smin_sext(i32 %a) {
   ret i64 %min
 }
 
-; FIXME
 define <2 x i64>@smin_sext_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @smin_sext_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = sext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp slt <2 x i32> %a, <i32 1, i32 1>
-; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> zeroinitializer
+; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt <2 x i64> [[A_EXT]], zeroinitializer
+; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> zeroinitializer, <2 x i64> [[A_EXT]]
 ; CHECK-NEXT:    ret <2 x i64> [[MIN]]
 ;
   %a_ext = sext <2 x i32> %a to <2 x i64>
@@ -338,12 +328,11 @@ define i64 @umax_sext(i32 %a) {
   ret i64 %max
 }
 
-; FIXME
 define <2 x i64> @umax_sext_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @umax_sext_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = sext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i32> %a, <i32 2, i32 2>
-; CHECK-NEXT:    [[MAX:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> <i64 3, i64 3>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i64> [[A_EXT]], <i64 3, i64 3>
+; CHECK-NEXT:    [[MAX:%.*]] = select <2 x i1> [[CMP]], <2 x i64> <i64 3, i64 3>, <2 x i64> [[A_EXT]]
 ; CHECK-NEXT:    ret <2 x i64> [[MAX]]
 ;
   %a_ext = sext <2 x i32> %a to <2 x i64>
@@ -365,12 +354,11 @@ define i64 @umin_sext(i32 %a) {
   ret i64 %min
 }
 
-; FIXME
 define <2 x i64> @umin_sext_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @umin_sext_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = sext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> %a, <i32 3, i32 3>
-; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> <i64 2, i64 2>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i64> [[A_EXT]], <i64 2, i64 2>
+; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> <i64 2, i64 2>, <2 x i64> [[A_EXT]]
 ; CHECK-NEXT:    ret <2 x i64> [[MIN]]
 ;
   %a_ext = sext <2 x i32> %a to <2 x i64>
@@ -392,12 +380,11 @@ define i64 @umax_sext2(i32 %a) {
   ret i64 %min
 }
 
-; FIXME
 define <2 x i64> @umax_sext2_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @umax_sext2_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = sext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> %a, <i32 3, i32 3>
-; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> <i64 2, i64 2>, <2 x i64> [[A_EXT]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i64> [[A_EXT]], <i64 2, i64 2>
+; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> <i64 2, i64 2>
 ; CHECK-NEXT:    ret <2 x i64> [[MIN]]
 ;
   %a_ext = sext <2 x i32> %a to <2 x i64>
@@ -419,12 +406,11 @@ define i64 @umin_sext2(i32 %a) {
   ret i64 %min
 }
 
-; FIXME
 define <2 x i64> @umin_sext2_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @umin_sext2_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = sext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i32> %a, <i32 2, i32 2>
-; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> <i64 3, i64 3>, <2 x i64> [[A_EXT]]
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i64> [[A_EXT]], <i64 3, i64 3>
+; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> <i64 3, i64 3>
 ; CHECK-NEXT:    ret <2 x i64> [[MIN]]
 ;
   %a_ext = sext <2 x i32> %a to <2 x i64>
@@ -446,12 +432,11 @@ define i64 @umax_zext(i32 %a) {
   ret i64 %max
 }
 
-; FIXME
 define <2 x i64> @umax_zext_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @umax_zext_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = zext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i32> %a, <i32 2, i32 2>
-; CHECK-NEXT:    [[MAX:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> <i64 3, i64 3>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i64> [[A_EXT]], <i64 3, i64 3>
+; CHECK-NEXT:    [[MAX:%.*]] = select <2 x i1> [[CMP]], <2 x i64> <i64 3, i64 3>, <2 x i64> [[A_EXT]]
 ; CHECK-NEXT:    ret <2 x i64> [[MAX]]
 ;
   %a_ext = zext <2 x i32> %a to <2 x i64>
@@ -473,12 +458,11 @@ define i64 @umin_zext(i32 %a) {
   ret i64 %min
 }
 
-; FIXME
 define <2 x i64> @umin_zext_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @umin_zext_vec(
 ; CHECK-NEXT:    [[A_EXT:%.*]] = zext <2 x i32> %a to <2 x i64>
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ult <2 x i32> %a, <i32 3, i32 3>
-; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> [[A_EXT]], <2 x i64> <i64 2, i64 2>
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt <2 x i64> [[A_EXT]], <i64 2, i64 2>
+; CHECK-NEXT:    [[MIN:%.*]] = select <2 x i1> [[CMP]], <2 x i64> <i64 2, i64 2>, <2 x i64> [[A_EXT]]
 ; CHECK-NEXT:    ret <2 x i64> [[MIN]]
 ;
   %a_ext = zext <2 x i32> %a to <2 x i64>
@@ -487,3 +471,16 @@ define <2 x i64> @umin_zext_vec(<2 x i32
   ret <2 x i64> %min
 }
 
+; Don't crash mishandling a pattern that can't be transformed.
+
+define <2 x i16> @scalar_select_of_vectors(<2 x i16> %a, <2 x i16> %b, i8 %x) {
+; CHECK-LABEL: @scalar_select_of_vectors(
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 %x, 0
+; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[CMP]], <2 x i16> %a, <2 x i16> %b
+; CHECK-NEXT:    ret <2 x i16> [[SEL]]
+;
+  %cmp = icmp slt i8 %x, 0
+  %sel = select i1 %cmp, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %sel
+}
+




More information about the llvm-commits mailing list