[llvm] r223349 - [InstCombine] Minor optimization for bswap with binary ops
Simon Pilgrim
llvm-dev at redking.me.uk
Thu Dec 4 01:44:01 PST 2014
Author: rksimon
Date: Thu Dec 4 03:44:01 2014
New Revision: 223349
URL: http://llvm.org/viewvc/llvm-project?rev=223349&view=rev
Log:
[InstCombine] Minor optimization for bswap with binary ops
Added instcombine optimizations for BSWAP with AND/OR/XOR ops:
OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
Since its just a one liner, I've also added BSWAP to the DAGCombiner equivalent as well:
fold (OP (bswap x), (bswap y)) -> (bswap (OP x, y))
Refactored bswap-fold tests to use FileCheck instead of just checking that the bswaps had gone.
Differential Revision: http://reviews.llvm.org/D6407
Modified:
llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/trunk/lib/Transforms/InstCombine/InstCombine.h
llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
llvm/trunk/test/Transforms/InstCombine/bswap-fold.ll
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=223349&r1=223348&r2=223349&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Thu Dec 4 03:44:01 2014
@@ -2526,6 +2526,7 @@ SDValue DAGCombiner::SimplifyBinOpWithSa
// fold (OP (zext x), (zext y)) -> (zext (OP x, y))
// fold (OP (sext x), (sext y)) -> (sext (OP x, y))
// fold (OP (aext x), (aext y)) -> (aext (OP x, y))
+ // fold (OP (bswap x), (bswap y)) -> (bswap (OP x, y))
// fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free)
//
// do not sink logical op inside of a vector extend, since it may combine
@@ -2533,6 +2534,7 @@ SDValue DAGCombiner::SimplifyBinOpWithSa
EVT Op0VT = N0.getOperand(0).getValueType();
if ((N0.getOpcode() == ISD::ZERO_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND ||
+ N0.getOpcode() == ISD::BSWAP ||
// Avoid infinite looping with PromoteIntBinOp.
(N0.getOpcode() == ISD::ANY_EXTEND &&
(!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) ||
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombine.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombine.h?rev=223349&r1=223348&r2=223349&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombine.h (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombine.h Thu Dec 4 03:44:01 2014
@@ -402,6 +402,7 @@ private:
APInt &UndefElts, unsigned Depth = 0);
Value *SimplifyVectorOp(BinaryOperator &Inst);
+ Value *SimplifyBSwap(BinaryOperator &Inst);
// FoldOpIntoPhi - Given a binary operator, cast instruction, or select
// which has a PHI node as operand #0, see if we can fold the instruction
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp?rev=223349&r1=223348&r2=223349&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp Thu Dec 4 03:44:01 2014
@@ -117,6 +117,61 @@ static Value *getFCmpValue(bool isordere
return Builder->CreateFCmp(Pred, LHS, RHS);
}
+/// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) to BSWAP(BITWISE_OP(A, B))
+/// \param I Binary operator to transform.
+/// \return Pointer to node that must replace the original binary operator, or
+/// null pointer if no transformation was made.
+Value *InstCombiner::SimplifyBSwap(BinaryOperator &I) {
+ IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
+
+ // Can't do vectors.
+ if (I.getType()->isVectorTy()) return nullptr;
+
+ // Can only do bitwise ops.
+ unsigned Op = I.getOpcode();
+ if (Op != Instruction::And && Op != Instruction::Or &&
+ Op != Instruction::Xor)
+ return nullptr;
+
+ Value *OldLHS = I.getOperand(0);
+ Value *OldRHS = I.getOperand(1);
+ ConstantInt *ConstLHS = dyn_cast<ConstantInt>(OldLHS);
+ ConstantInt *ConstRHS = dyn_cast<ConstantInt>(OldRHS);
+ IntrinsicInst *IntrLHS = dyn_cast<IntrinsicInst>(OldLHS);
+ IntrinsicInst *IntrRHS = dyn_cast<IntrinsicInst>(OldRHS);
+ bool IsBswapLHS = (IntrLHS && IntrLHS->getIntrinsicID() == Intrinsic::bswap);
+ bool IsBswapRHS = (IntrRHS && IntrRHS->getIntrinsicID() == Intrinsic::bswap);
+
+ if (!IsBswapLHS && !IsBswapRHS)
+ return nullptr;
+
+ if (!IsBswapLHS && !ConstLHS)
+ return nullptr;
+
+ if (!IsBswapRHS && !ConstRHS)
+ return nullptr;
+
+ /// OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
+ /// OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
+ Value *NewLHS = IsBswapLHS ? IntrLHS->getOperand(0) :
+ Builder->getInt(ConstLHS->getValue().byteSwap());
+
+ Value *NewRHS = IsBswapRHS ? IntrRHS->getOperand(0) :
+ Builder->getInt(ConstRHS->getValue().byteSwap());
+
+ Value *BinOp = nullptr;
+ if (Op == Instruction::And)
+ BinOp = Builder->CreateAnd(NewLHS, NewRHS);
+ else if (Op == Instruction::Or)
+ BinOp = Builder->CreateOr(NewLHS, NewRHS);
+ else //if (Op == Instruction::Xor)
+ BinOp = Builder->CreateXor(NewLHS, NewRHS);
+
+ Module *M = I.getParent()->getParent()->getParent();
+ Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy);
+ return Builder->CreateCall(F, BinOp);
+}
+
// OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
// guaranteed to be a binary operator.
@@ -1185,6 +1240,9 @@ Instruction *InstCombiner::visitAnd(Bina
if (SimplifyDemandedInstructionBits(I))
return &I;
+ if (Value *V = SimplifyBSwap(I))
+ return ReplaceInstUsesWith(I, V);
+
if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
const APInt &AndRHSMask = AndRHS->getValue();
@@ -2118,6 +2176,9 @@ Instruction *InstCombiner::visitOr(Binar
if (SimplifyDemandedInstructionBits(I))
return &I;
+ if (Value *V = SimplifyBSwap(I))
+ return ReplaceInstUsesWith(I, V);
+
if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
ConstantInt *C1 = nullptr; Value *X = nullptr;
// (X & C1) | C2 --> (X | C2) & (C1|C2)
@@ -2502,6 +2563,9 @@ Instruction *InstCombiner::visitXor(Bina
if (SimplifyDemandedInstructionBits(I))
return &I;
+ if (Value *V = SimplifyBSwap(I))
+ return ReplaceInstUsesWith(I, V);
+
// Is this a ~ operation?
if (Value *NotOp = dyn_castNotVal(&I)) {
if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
Modified: llvm/trunk/test/Transforms/InstCombine/bswap-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/bswap-fold.ll?rev=223349&r1=223348&r2=223349&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/bswap-fold.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/bswap-fold.ll Thu Dec 4 03:44:01 2014
@@ -1,63 +1,79 @@
-; RUN: opt < %s -instcombine -S | not grep call.*bswap
+; RUN: opt < %s -instcombine -S | FileCheck %s
define i1 @test1(i16 %tmp2) {
+; CHECK-LABEL: @test1
+; CHECK-NEXT: %tmp = icmp eq i16 %tmp2, 256
+; CHECK-NEXT: ret i1 %tmp
%tmp10 = call i16 @llvm.bswap.i16( i16 %tmp2 )
%tmp = icmp eq i16 %tmp10, 1
ret i1 %tmp
}
define i1 @test2(i32 %tmp) {
+; CHECK-LABEL: @test2
+; CHECK-NEXT: %tmp.upgrd.1 = icmp eq i32 %tmp, 16777216
+; CHECK-NEXT: ret i1 %tmp.upgrd.1
%tmp34 = tail call i32 @llvm.bswap.i32( i32 %tmp )
%tmp.upgrd.1 = icmp eq i32 %tmp34, 1
ret i1 %tmp.upgrd.1
}
-declare i32 @llvm.bswap.i32(i32)
-
define i1 @test3(i64 %tmp) {
+; CHECK-LABEL: @test3
+; CHECK-NEXT: %tmp.upgrd.2 = icmp eq i64 %tmp, 72057594037927936
+; CHECK-NEXT: ret i1 %tmp.upgrd.2
%tmp34 = tail call i64 @llvm.bswap.i64( i64 %tmp )
%tmp.upgrd.2 = icmp eq i64 %tmp34, 1
ret i1 %tmp.upgrd.2
}
-declare i64 @llvm.bswap.i64(i64)
-
-declare i16 @llvm.bswap.i16(i16)
-
; rdar://5992453
; A & 255
define i32 @test4(i32 %a) nounwind {
-entry:
- %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+; CHECK-LABEL: @test4
+; CHECK-NEXT: %tmp2 = and i32 %a, 255
+; CHECK-NEXT: ret i32 %tmp2
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
%tmp4 = lshr i32 %tmp2, 24
ret i32 %tmp4
}
; A
-define i32 @test5(i32 %a) nounwind {
-entry:
+define i32 @test5(i32 %a) nounwind {
+; CHECK-LABEL: @test5
+; CHECK-NEXT: ret i32 %a
%tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
%tmp4 = tail call i32 @llvm.bswap.i32( i32 %tmp2 )
ret i32 %tmp4
}
; a >> 24
-define i32 @test6(i32 %a) nounwind {
-entry:
- %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
+define i32 @test6(i32 %a) nounwind {
+; CHECK-LABEL: @test6
+; CHECK-NEXT: %tmp2 = lshr i32 %a, 24
+; CHECK-NEXT ret i32 %tmp4
+ %tmp2 = tail call i32 @llvm.bswap.i32( i32 %a )
%tmp4 = and i32 %tmp2, 255
ret i32 %tmp4
}
; PR5284
define i16 @test7(i32 %A) {
- %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
+; CHECK-LABEL: @test7
+; CHECK-NEXT: %1 = lshr i32 %A, 16
+; CHECK-NEXT: %D = trunc i32 %1 to i16
+; CHECK-NEXT ret i16 %D
+ %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
%C = trunc i32 %B to i16
%D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
ret i16 %D
}
define i16 @test8(i64 %A) {
+; CHECK-LABEL: @test8
+; CHECK-NEXT: %1 = lshr i64 %A, 48
+; CHECK-NEXT: %D = trunc i64 %1 to i16
+; CHECK-NEXT ret i16 %D
%B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
%C = trunc i64 %B to i16
%D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
@@ -66,6 +82,144 @@ define i16 @test8(i64 %A) {
; Misc: Fold bswap(undef) to undef.
define i64 @foo() {
+; CHECK-LABEL: @foo
+; CHECK-NEXT: ret i64 undef
%a = call i64 @llvm.bswap.i64(i64 undef)
ret i64 %a
}
+
+; PR15782
+; Fold: OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
+; Fold: OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
+define i16 @bs_and16i(i16 %a, i16 %b) #0 {
+; CHECK-LABEL: @bs_and16i
+; CHECK-NEXT: %1 = and i16 %a, 4391
+; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
+; CHECK-NEXT: ret i16 %2
+ %1 = tail call i16 @llvm.bswap.i16(i16 %a)
+ %2 = and i16 %1, 10001
+ ret i16 %2
+}
+
+define i16 @bs_and16(i16 %a, i16 %b) #0 {
+; CHECK-LABEL: @bs_and16
+; CHECK-NEXT: %1 = and i16 %a, %b
+; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
+; CHECK-NEXT: ret i16 %2
+ %tmp1 = tail call i16 @llvm.bswap.i16(i16 %a)
+ %tmp2 = tail call i16 @llvm.bswap.i16(i16 %b)
+ %tmp3 = and i16 %tmp1, %tmp2
+ ret i16 %tmp3
+}
+
+define i16 @bs_or16(i16 %a, i16 %b) #0 {
+; CHECK-LABEL: @bs_or16
+; CHECK-NEXT: %1 = or i16 %a, %b
+; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
+; CHECK-NEXT: ret i16 %2
+ %tmp1 = tail call i16 @llvm.bswap.i16(i16 %a)
+ %tmp2 = tail call i16 @llvm.bswap.i16(i16 %b)
+ %tmp3 = or i16 %tmp1, %tmp2
+ ret i16 %tmp3
+}
+
+define i16 @bs_xor16(i16 %a, i16 %b) #0 {
+; CHECK-LABEL: @bs_xor16
+; CHECK-NEXT: %1 = xor i16 %a, %b
+; CHECK-NEXT: %2 = call i16 @llvm.bswap.i16(i16 %1)
+; CHECK-NEXT: ret i16 %2
+ %tmp1 = tail call i16 @llvm.bswap.i16(i16 %a)
+ %tmp2 = tail call i16 @llvm.bswap.i16(i16 %b)
+ %tmp3 = xor i16 %tmp1, %tmp2
+ ret i16 %tmp3
+}
+
+define i32 @bs_and32i(i32 %a, i32 %b) #0 {
+; CHECK-LABEL: @bs_and32i
+; CHECK-NEXT: %1 = and i32 %a, -1585053440
+; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
+; CHECK-NEXT: ret i32 %2
+ %tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
+ %tmp2 = and i32 %tmp1, 100001
+ ret i32 %tmp2
+}
+
+define i32 @bs_and32(i32 %a, i32 %b) #0 {
+; CHECK-LABEL: @bs_and32
+; CHECK-NEXT: %1 = and i32 %a, %b
+; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
+; CHECK-NEXT: ret i32 %2
+ %tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
+ %tmp2 = tail call i32 @llvm.bswap.i32(i32 %b)
+ %tmp3 = and i32 %tmp1, %tmp2
+ ret i32 %tmp3
+}
+
+define i32 @bs_or32(i32 %a, i32 %b) #0 {
+; CHECK-LABEL: @bs_or32
+; CHECK-NEXT: %1 = or i32 %a, %b
+; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
+; CHECK-NEXT: ret i32 %2
+ %tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
+ %tmp2 = tail call i32 @llvm.bswap.i32(i32 %b)
+ %tmp3 = or i32 %tmp1, %tmp2
+ ret i32 %tmp3
+}
+
+define i32 @bs_xor32(i32 %a, i32 %b) #0 {
+; CHECK-LABEL: @bs_xor32
+; CHECK-NEXT: %1 = xor i32 %a, %b
+; CHECK-NEXT: %2 = call i32 @llvm.bswap.i32(i32 %1)
+; CHECK-NEXT: ret i32 %2
+ %tmp1 = tail call i32 @llvm.bswap.i32(i32 %a)
+ %tmp2 = tail call i32 @llvm.bswap.i32(i32 %b)
+ %tmp3 = xor i32 %tmp1, %tmp2
+ ret i32 %tmp3
+}
+
+define i64 @bs_and64i(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_and64i
+; CHECK-NEXT: %1 = and i64 %a, 129085117527228416
+; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
+; CHECK-NEXT: ret i64 %2
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = and i64 %tmp1, 1000000001
+ ret i64 %tmp2
+}
+
+define i64 @bs_and64(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_and64
+; CHECK-NEXT: %1 = and i64 %a, %b
+; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
+; CHECK-NEXT: ret i64 %2
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
+ %tmp3 = and i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+define i64 @bs_or64(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_or64
+; CHECK-NEXT: %1 = or i64 %a, %b
+; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
+; CHECK-NEXT: ret i64 %2
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
+ %tmp3 = or i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+define i64 @bs_xor64(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: @bs_xor64
+; CHECK-NEXT: %1 = xor i64 %a, %b
+; CHECK-NEXT: %2 = call i64 @llvm.bswap.i64(i64 %1)
+; CHECK-NEXT: ret i64 %2
+ %tmp1 = tail call i64 @llvm.bswap.i64(i64 %a)
+ %tmp2 = tail call i64 @llvm.bswap.i64(i64 %b)
+ %tmp3 = xor i64 %tmp1, %tmp2
+ ret i64 %tmp3
+}
+
+declare i16 @llvm.bswap.i16(i16)
+declare i32 @llvm.bswap.i32(i32)
+declare i64 @llvm.bswap.i64(i64)
More information about the llvm-commits
mailing list