[llvm] 6b2eb31 - [InstCombine] Add support for zext(and(neg(amt),width-1)) rotate shift amount patterns

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 26 04:23:02 PDT 2020


Author: Simon Pilgrim
Date: 2020-10-26T11:22:41Z
New Revision: 6b2eb31e1e2db1f3ca7a5c4914ab08cb18698de7

URL: https://github.com/llvm/llvm-project/commit/6b2eb31e1e2db1f3ca7a5c4914ab08cb18698de7
DIFF: https://github.com/llvm/llvm-project/commit/6b2eb31e1e2db1f3ca7a5c4914ab08cb18698de7.diff

LOG: [InstCombine] Add support for zext(and(neg(amt),width-1)) rotate shift amount patterns

Alive2: https://alive2.llvm.org/ce/z/bCvvHd

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/test/Transforms/InstCombine/rotate.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 2911886636f9..7b5647e9d990 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -2115,6 +2115,10 @@ static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
                        m_SpecificInt(Mask))))
       return L;
 
+    if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
+        match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
+      return L;
+
     return nullptr;
   };
 

diff  --git a/llvm/test/Transforms/InstCombine/rotate.ll b/llvm/test/Transforms/InstCombine/rotate.ll
index bead9691cd7f..36e485158d63 100644
--- a/llvm/test/Transforms/InstCombine/rotate.ll
+++ b/llvm/test/Transforms/InstCombine/rotate.ll
@@ -609,15 +609,9 @@ define i16 @rotateright_16_neg_mask_wide_amount_commute(i16 %v, i32 %shamt) {
 
 define i64 @rotateright_64_zext_neg_mask_amount(i64 %0, i32 %1) {
 ; CHECK-LABEL: @rotateright_64_zext_neg_mask_amount(
-; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP1:%.*]], 63
-; CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP5:%.*]] = lshr i64 [[TMP0:%.*]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = sub nsw i32 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = and i32 [[TMP6]], 63
-; CHECK-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
-; CHECK-NEXT:    [[TMP9:%.*]] = shl i64 [[TMP0]], [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = or i64 [[TMP5]], [[TMP9]]
-; CHECK-NEXT:    ret i64 [[TMP10]]
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP1:%.*]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshr.i64(i64 [[TMP0:%.*]], i64 [[TMP0]], i64 [[TMP3]])
+; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
   %3 = and i32 %1, 63
   %4 = zext i32 %3 to i64
@@ -666,15 +660,9 @@ define i8 @rotateleft_8_neg_mask_wide_amount_commute(i8 %v, i32 %shamt) {
 
 define i64 @rotateleft_64_zext_neg_mask_amount(i64 %0, i32 %1) {
 ; CHECK-LABEL: @rotateleft_64_zext_neg_mask_amount(
-; CHECK-NEXT:    [[TMP3:%.*]] = and i32 [[TMP1:%.*]], 63
-; CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
-; CHECK-NEXT:    [[TMP5:%.*]] = shl i64 [[TMP0:%.*]], [[TMP4]]
-; CHECK-NEXT:    [[TMP6:%.*]] = sub nsw i32 0, [[TMP1]]
-; CHECK-NEXT:    [[TMP7:%.*]] = and i32 [[TMP6]], 63
-; CHECK-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
-; CHECK-NEXT:    [[TMP9:%.*]] = lshr i64 [[TMP0]], [[TMP8]]
-; CHECK-NEXT:    [[TMP10:%.*]] = or i64 [[TMP5]], [[TMP9]]
-; CHECK-NEXT:    ret i64 [[TMP10]]
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP1:%.*]] to i64
+; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.fshl.i64(i64 [[TMP0:%.*]], i64 [[TMP0]], i64 [[TMP3]])
+; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
   %3 = and i32 %1, 63
   %4 = zext i32 %3 to i64


        


More information about the llvm-commits mailing list