[llvm] fadd152 - [AggressiveInstCombine] foldAnyOrAllBitsSet - add uniform vector support

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 15 03:03:20 PDT 2020


Author: Simon Pilgrim
Date: 2020-10-15T11:02:35+01:00
New Revision: fadd152317598aa71538948613ce24b094c5c7c2

URL: https://github.com/llvm/llvm-project/commit/fadd152317598aa71538948613ce24b094c5c7c2
DIFF: https://github.com/llvm/llvm-project/commit/fadd152317598aa71538948613ce24b094c5c7c2.diff

LOG: [AggressiveInstCombine] foldAnyOrAllBitsSet - add uniform vector support

Replace m_ConstantInt with m_APInt to support uniform vectors (with no undef elements)

Adding non-undef support would involve some refactoring of the MaskOps struct but this might still be worth it.

Added: 
    

Modified: 
    llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
    llvm/test/Transforms/AggressiveInstCombine/masked-cmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index 6acf4f4c1546..77c794037d6e 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -203,8 +203,8 @@ static bool matchAndOrChain(Value *V, MaskOps &MOps) {
   // We need a shift-right or a bare value representing a compare of bit 0 of
   // the original source operand.
   Value *Candidate;
-  uint64_t BitIndex = 0;
-  if (!match(V, m_LShr(m_Value(Candidate), m_ConstantInt(BitIndex))))
+  const APInt *BitIndex = nullptr;
+  if (!match(V, m_LShr(m_Value(Candidate), m_APInt(BitIndex))))
     Candidate = V;
 
   // Initialize result source operand.
@@ -212,11 +212,11 @@ static bool matchAndOrChain(Value *V, MaskOps &MOps) {
     MOps.Root = Candidate;
 
   // The shift constant is out-of-range? This code hasn't been simplified.
-  if (BitIndex >= MOps.Mask.getBitWidth())
+  if (BitIndex && BitIndex->uge(MOps.Mask.getBitWidth()))
     return false;
 
   // Fill in the mask bit derived from the shift constant.
-  MOps.Mask.setBit(BitIndex);
+  MOps.Mask.setBit(BitIndex ? BitIndex->getZExtValue() : 0);
   return MOps.Root == Candidate;
 }
 

diff  --git a/llvm/test/Transforms/AggressiveInstCombine/masked-cmp.ll b/llvm/test/Transforms/AggressiveInstCombine/masked-cmp.ll
index 1ab013835737..152469c2d759 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/masked-cmp.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/masked-cmp.ll
@@ -18,10 +18,10 @@ define i32 @anyset_two_bit_mask(i32 %x) {
 
 define <2 x i32> @anyset_two_bit_mask_uniform(<2 x i32> %x) {
 ; CHECK-LABEL: @anyset_two_bit_mask_uniform(
-; CHECK-NEXT:    [[S:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 3, i32 3>
-; CHECK-NEXT:    [[O:%.*]] = or <2 x i32> [[S]], [[X]]
-; CHECK-NEXT:    [[R:%.*]] = and <2 x i32> [[O]], <i32 1, i32 1>
-; CHECK-NEXT:    ret <2 x i32> [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], <i32 9, i32 9>
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i32> [[TMP1]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
+; CHECK-NEXT:    ret <2 x i32> [[TMP3]]
 ;
   %s = lshr <2 x i32> %x, <i32 3, i32 3>
   %o = or <2 x i32> %s, %x
@@ -48,14 +48,10 @@ define i32 @anyset_four_bit_mask(i32 %x) {
 
 define <2 x i32> @anyset_four_bit_mask_uniform(<2 x i32> %x) {
 ; CHECK-LABEL: @anyset_four_bit_mask_uniform(
-; CHECK-NEXT:    [[T1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 3, i32 3>
-; CHECK-NEXT:    [[T2:%.*]] = lshr <2 x i32> [[X]], <i32 5, i32 5>
-; CHECK-NEXT:    [[T3:%.*]] = lshr <2 x i32> [[X]], <i32 8, i32 8>
-; CHECK-NEXT:    [[O1:%.*]] = or <2 x i32> [[T1]], [[X]]
-; CHECK-NEXT:    [[O2:%.*]] = or <2 x i32> [[T2]], [[T3]]
-; CHECK-NEXT:    [[O3:%.*]] = or <2 x i32> [[O1]], [[O2]]
-; CHECK-NEXT:    [[R:%.*]] = and <2 x i32> [[O3]], <i32 1, i32 1>
-; CHECK-NEXT:    ret <2 x i32> [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], <i32 297, i32 297>
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i32> [[TMP1]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
+; CHECK-NEXT:    ret <2 x i32> [[TMP3]]
 ;
   %t1 = lshr <2 x i32> %x, <i32 3, i32 3>
   %t2 = lshr <2 x i32> %x, <i32 5, i32 5>
@@ -87,13 +83,10 @@ define i32 @anyset_three_bit_mask_all_shifted_bits(i32 %x) {
 
 define <2 x i32> @anyset_three_bit_mask_all_shifted_bits_uniform(<2 x i32> %x) {
 ; CHECK-LABEL: @anyset_three_bit_mask_all_shifted_bits_uniform(
-; CHECK-NEXT:    [[T1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 3, i32 3>
-; CHECK-NEXT:    [[T2:%.*]] = lshr <2 x i32> [[X]], <i32 5, i32 5>
-; CHECK-NEXT:    [[T3:%.*]] = lshr <2 x i32> [[X]], <i32 8, i32 8>
-; CHECK-NEXT:    [[O2:%.*]] = or <2 x i32> [[T2]], [[T3]]
-; CHECK-NEXT:    [[O3:%.*]] = or <2 x i32> [[T1]], [[O2]]
-; CHECK-NEXT:    [[R:%.*]] = and <2 x i32> [[O3]], <i32 1, i32 1>
-; CHECK-NEXT:    ret <2 x i32> [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], <i32 296, i32 296>
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne <2 x i32> [[TMP1]], zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
+; CHECK-NEXT:    ret <2 x i32> [[TMP3]]
 ;
   %t1 = lshr <2 x i32> %x, <i32 3, i32 3>
   %t2 = lshr <2 x i32> %x, <i32 5, i32 5>
@@ -121,10 +114,10 @@ define i32 @allset_two_bit_mask(i32 %x) {
 
 define <2 x i32> @allset_two_bit_mask_uniform(<2 x i32> %x) {
 ; CHECK-LABEL: @allset_two_bit_mask_uniform(
-; CHECK-NEXT:    [[S:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 7, i32 7>
-; CHECK-NEXT:    [[O:%.*]] = and <2 x i32> [[S]], [[X]]
-; CHECK-NEXT:    [[R:%.*]] = and <2 x i32> [[O]], <i32 1, i32 1>
-; CHECK-NEXT:    ret <2 x i32> [[R]]
+; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], <i32 129, i32 129>
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP1]], <i32 129, i32 129>
+; CHECK-NEXT:    [[TMP3:%.*]] = zext <2 x i1> [[TMP2]] to <2 x i32>
+; CHECK-NEXT:    ret <2 x i32> [[TMP3]]
 ;
   %s = lshr <2 x i32> %x, <i32 7, i32 7>
   %o = and <2 x i32> %s, %x


        


More information about the llvm-commits mailing list