[llvm] 2b346a1 - Recommit "[InstCombine] Improve bswap optimization" (2nd try)

Noah Goldstein via llvm-commits llvm-commits at lists.llvm.org
Tue May 16 16:58:38 PDT 2023


Author: Austin Chang
Date: 2023-05-16T18:58:09-05:00
New Revision: 2b346a138d40b1a325a58ca9fa9d4ce3b3b0cf78

URL: https://github.com/llvm/llvm-project/commit/2b346a138d40b1a325a58ca9fa9d4ce3b3b0cf78
DIFF: https://github.com/llvm/llvm-project/commit/2b346a138d40b1a325a58ca9fa9d4ce3b3b0cf78.diff

LOG: Recommit "[InstCombine] Improve bswap optimization" (2nd try)

Issue was an assertion failure due to an unchecked `cast`. Fix is to
check the operator is `BinaryOperator` before cast so that we won't
match `ConstExpr`

Reviewed By: goldstein.w.n, RKSimon

Differential Revision: https://reviews.llvm.org/D149699

Added: 
    

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
    llvm/test/Transforms/InstCombine/bswap-fold.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index c2ef6f8b7910..2992d163344d 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1325,6 +1325,45 @@ foldShuffledIntrinsicOperands(IntrinsicInst *II,
   return new ShuffleVectorInst(NewIntrinsic, Mask);
 }
 
+/// Fold the following cases and accepts bswap and bitreverse intrinsics:
+///   bswap(logic_op(bswap(x), y)) --> logic_op(x, bswap(y))
+///   bswap(logic_op(bswap(x), bswap(y))) --> logic_op(x, y) (ignores multiuse)
+template <Intrinsic::ID IntrID>
+static Instruction *foldBitOrderCrossLogicOp(Value *V,
+                                             InstCombiner::BuilderTy &Builder) {
+  static_assert(IntrID == Intrinsic::bswap || IntrID == Intrinsic::bitreverse,
+                "This helper only supports BSWAP and BITREVERSE intrinsics");
+
+  Value *X, *Y;
+  // Find bitwise logic op. Check that it is a BinaryOperator explicitly so we
+  // don't match ConstantExpr that aren't meaningful for this transform.
+  if (match(V, m_OneUse(m_BitwiseLogic(m_Value(X), m_Value(Y)))) &&
+      isa<BinaryOperator>(V)) {
+    Value *OldReorderX, *OldReorderY;
+    BinaryOperator::BinaryOps Op = cast<BinaryOperator>(V)->getOpcode();
+
+    // If both X and Y are bswap/bitreverse, the transform reduces the number
+    // of instructions even if there's multiuse.
+    // If only one operand is bswap/bitreverse, we need to ensure the operand
+    // have only one use.
+    if (match(X, m_Intrinsic<IntrID>(m_Value(OldReorderX))) &&
+        match(Y, m_Intrinsic<IntrID>(m_Value(OldReorderY)))) {
+      return BinaryOperator::Create(Op, OldReorderX, OldReorderY);
+    }
+
+    if (match(X, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderX))))) {
+      Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, Y);
+      return BinaryOperator::Create(Op, OldReorderX, NewReorder);
+    }
+
+    if (match(Y, m_OneUse(m_Intrinsic<IntrID>(m_Value(OldReorderY))))) {
+      Value *NewReorder = Builder.CreateUnaryIntrinsic(IntrID, X);
+      return BinaryOperator::Create(Op, NewReorder, OldReorderY);
+    }
+  }
+  return nullptr;
+}
+
 /// CallInst simplification. This mostly only handles folding of intrinsic
 /// instructions. For normal calls, it allows visitCallBase to do the heavy
 /// lifting.
@@ -1728,6 +1767,12 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
       Value *V = Builder.CreateLShr(X, CV);
       return new TruncInst(V, IIOperand->getType());
     }
+
+    if (Instruction *crossLogicOpFold =
+            foldBitOrderCrossLogicOp<Intrinsic::bswap>(IIOperand, Builder)) {
+      return crossLogicOpFold;
+    }
+
     break;
   }
   case Intrinsic::masked_load:

diff  --git a/llvm/test/Transforms/InstCombine/bswap-fold.ll b/llvm/test/Transforms/InstCombine/bswap-fold.ll
index 5f6280bd24ce..844ae1ee3ca3 100644
--- a/llvm/test/Transforms/InstCombine/bswap-fold.ll
+++ b/llvm/test/Transforms/InstCombine/bswap-fold.ll
@@ -543,10 +543,9 @@ define i64 @bs_and64i_multiuse(i64 %a, i64 %b) #0 {
 ; Fold: BSWAP( OP( BSWAP(x), y ) ) -> OP( x, BSWAP(y) )
 define i16 @bs_and_lhs_bs16(i16 %a, i16 %b) #0 {
 ; CHECK-LABEL: @bs_and_lhs_bs16(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[A:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[B:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; CHECK-NEXT:    ret i16 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT:    ret i16 [[TMP2]]
 ;
   %1 = tail call i16 @llvm.bswap.i16(i16 %a)
   %2 = and i16 %1, %b
@@ -556,10 +555,9 @@ define i16 @bs_and_lhs_bs16(i16 %a, i16 %b) #0 {
 
 define i16 @bs_or_lhs_bs16(i16 %a, i16 %b) #0 {
 ; CHECK-LABEL: @bs_or_lhs_bs16(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[A:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = or i16 [[TMP1]], [[B:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; CHECK-NEXT:    ret i16 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = or i16 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT:    ret i16 [[TMP2]]
 ;
   %1 = tail call i16 @llvm.bswap.i16(i16 %a)
   %2 = or i16 %1, %b
@@ -569,10 +567,9 @@ define i16 @bs_or_lhs_bs16(i16 %a, i16 %b) #0 {
 
 define i16 @bs_xor_lhs_bs16(i16 %a, i16 %b) #0 {
 ; CHECK-LABEL: @bs_xor_lhs_bs16(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[A:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i16 [[TMP1]], [[B:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; CHECK-NEXT:    ret i16 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]]
+; CHECK-NEXT:    ret i16 [[TMP2]]
 ;
   %1 = tail call i16 @llvm.bswap.i16(i16 %a)
   %2 = xor i16 %1, %b
@@ -582,10 +579,9 @@ define i16 @bs_xor_lhs_bs16(i16 %a, i16 %b) #0 {
 
 define i16 @bs_and_rhs_bs16(i16 %a, i16 %b) #0 {
 ; CHECK-LABEL: @bs_and_rhs_bs16(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; CHECK-NEXT:    ret i16 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = and i16 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i16 [[TMP2]]
 ;
   %1 = tail call i16 @llvm.bswap.i16(i16 %b)
   %2 = and i16 %a, %1
@@ -595,10 +591,9 @@ define i16 @bs_and_rhs_bs16(i16 %a, i16 %b) #0 {
 
 define i16 @bs_or_rhs_bs16(i16 %a, i16 %b) #0 {
 ; CHECK-LABEL: @bs_or_rhs_bs16(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = or i16 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; CHECK-NEXT:    ret i16 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = or i16 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i16 [[TMP2]]
 ;
   %1 = tail call i16 @llvm.bswap.i16(i16 %b)
   %2 = or i16 %a, %1
@@ -608,10 +603,9 @@ define i16 @bs_or_rhs_bs16(i16 %a, i16 %b) #0 {
 
 define i16 @bs_xor_rhs_bs16(i16 %a, i16 %b) #0 {
 ; CHECK-LABEL: @bs_xor_rhs_bs16(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i16 @llvm.bswap.i16(i16 [[TMP2]])
-; CHECK-NEXT:    ret i16 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i16 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i16 [[TMP2]]
 ;
   %1 = tail call i16 @llvm.bswap.i16(i16 %b)
   %2 = xor i16 %a, %1
@@ -621,10 +615,9 @@ define i16 @bs_xor_rhs_bs16(i16 %a, i16 %b) #0 {
 
 define i32 @bs_and_rhs_bs32(i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: @bs_and_rhs_bs32(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[TMP2]])
-; CHECK-NEXT:    ret i32 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = tail call i32 @llvm.bswap.i32(i32 %b)
   %2 = and i32 %a, %1
@@ -634,10 +627,9 @@ define i32 @bs_and_rhs_bs32(i32 %a, i32 %b) #0 {
 
 define i32 @bs_or_rhs_bs32(i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: @bs_or_rhs_bs32(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[TMP2]])
-; CHECK-NEXT:    ret i32 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = tail call i32 @llvm.bswap.i32(i32 %b)
   %2 = or i32 %a, %1
@@ -647,10 +639,9 @@ define i32 @bs_or_rhs_bs32(i32 %a, i32 %b) #0 {
 
 define i32 @bs_xor_rhs_bs32(i32 %a, i32 %b) #0 {
 ; CHECK-LABEL: @bs_xor_rhs_bs32(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i32 @llvm.bswap.i32(i32 [[TMP2]])
-; CHECK-NEXT:    ret i32 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.bswap.i32(i32 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
   %1 = tail call i32 @llvm.bswap.i32(i32 %b)
   %2 = xor i32 %a, %1
@@ -660,10 +651,9 @@ define i32 @bs_xor_rhs_bs32(i32 %a, i32 %b) #0 {
 
 define i64 @bs_and_rhs_bs64(i64 %a, i64 %b) #0 {
 ; CHECK-LABEL: @bs_and_rhs_bs64(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.bswap.i64(i64 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i64 [[TMP2]]
 ;
   %1 = tail call i64 @llvm.bswap.i64(i64 %b)
   %2 = and i64 %a, %1
@@ -673,10 +663,9 @@ define i64 @bs_and_rhs_bs64(i64 %a, i64 %b) #0 {
 
 define i64 @bs_or_rhs_bs64(i64 %a, i64 %b) #0 {
 ; CHECK-LABEL: @bs_or_rhs_bs64(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.bswap.i64(i64 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = or i64 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i64 [[TMP2]]
 ;
   %1 = tail call i64 @llvm.bswap.i64(i64 %b)
   %2 = or i64 %a, %1
@@ -686,10 +675,9 @@ define i64 @bs_or_rhs_bs64(i64 %a, i64 %b) #0 {
 
 define i64 @bs_xor_rhs_bs64(i64 %a, i64 %b) #0 {
 ; CHECK-LABEL: @bs_xor_rhs_bs64(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.bswap.i64(i64 [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret i64 [[TMP2]]
 ;
   %1 = tail call i64 @llvm.bswap.i64(i64 %b)
   %2 = xor i64 %a, %1
@@ -699,10 +687,9 @@ define i64 @bs_xor_rhs_bs64(i64 %a, i64 %b) #0 {
 
 define <2 x i32> @bs_and_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
 ; CHECK-LABEL: @bs_and_rhs_i32vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP2]])
-; CHECK-NEXT:    ret <2 x i32> [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
 ;
   %1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
   %2 = and <2 x i32> %a, %1
@@ -712,10 +699,9 @@ define <2 x i32> @bs_and_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
 
 define <2 x i32> @bs_or_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
 ; CHECK-LABEL: @bs_or_rhs_i32vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = or <2 x i32> [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP2]])
-; CHECK-NEXT:    ret <2 x i32> [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = or <2 x i32> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
 ;
   %1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
   %2 = or <2 x i32> %a, %1
@@ -725,10 +711,9 @@ define <2 x i32> @bs_or_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
 
 define <2 x i32> @bs_xor_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 {
 ; CHECK-LABEL: @bs_xor_rhs_i32vec(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[B:%.*]])
-; CHECK-NEXT:    [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], [[A:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP2]])
-; CHECK-NEXT:    ret <2 x i32> [[TMP3]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[A:%.*]])
+; CHECK-NEXT:    [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], [[B:%.*]]
+; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
 ;
   %1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
   %2 = xor <2 x i32> %a, %1
@@ -782,11 +767,10 @@ define i64 @bs_all_operand64_multiuse_both(i64 %a, i64 %b) #0 {
 ; CHECK-LABEL: @bs_all_operand64_multiuse_both(
 ; CHECK-NEXT:    [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
-; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[TMP1]], [[TMP2]]
-; CHECK-NEXT:    [[TMP4:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP3]])
+; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[A]], [[B]]
 ; CHECK-NEXT:    call void @use.i64(i64 [[TMP1]])
 ; CHECK-NEXT:    call void @use.i64(i64 [[TMP2]])
-; CHECK-NEXT:    ret i64 [[TMP4]]
+; CHECK-NEXT:    ret i64 [[TMP3]]
 ;
   %1 = tail call i64 @llvm.bswap.i64(i64 %a)
   %2 = tail call i64 @llvm.bswap.i64(i64 %b)
@@ -798,6 +782,36 @@ define i64 @bs_all_operand64_multiuse_both(i64 %a, i64 %b) #0 {
   ret i64 %4
 }
 
+ at gp = external global [0 x i8]
+
+define void @bs_and_constexpr(ptr %out, i64 %a) {
+; CHECK-LABEL: @bs_and_constexpr(
+; CHECK-NEXT:    [[RES:%.*]] = call i64 @llvm.bswap.i64(i64 and (i64 ptrtoint (ptr @gp to i64), i64 4095))
+; CHECK-NEXT:    store i64 [[RES]], ptr [[OUT:%.*]], align 8
+; CHECK-NEXT:    ret void
+;
+  %gpi = ptrtoint ptr @gp to i64
+  %exp = and i64 %gpi, 4095
+  %res = call i64 @llvm.bswap.i64(i64 %exp)
+  store i64 %res, ptr %out, align 8
+  ret void
+}
+
+
+define void @bs_and_bs_constexpr(ptr %out, i64 %a) {
+; CHECK-LABEL: @bs_and_bs_constexpr(
+; CHECK-NEXT:    store i64 and (i64 ptrtoint (ptr @gp to i64), i64 -67835469387268096), ptr [[OUT:%.*]], align 8
+; CHECK-NEXT:    ret void
+;
+  %gpi = ptrtoint ptr @gp to i64
+  %bs_gpi = call i64 @llvm.bswap.i64(i64 %gpi)
+  %exp = and i64 %bs_gpi, 4095
+  %res = call i64 @llvm.bswap.i64(i64 %exp)
+  store i64 %res, ptr %out, align 8
+  ret void
+}
+
+
 define i64 @bs_active_high8(i64 %0) {
 ; CHECK-LABEL: @bs_active_high8(
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i64 [[TMP0:%.*]], 255


        


More information about the llvm-commits mailing list