[llvm] r203315 - [DAGCombiner] Recognize another rotation idiom

Adam Nemet anemet at apple.com
Fri Mar 7 15:56:28 PST 2014


Author: anemet
Date: Fri Mar  7 17:56:28 2014
New Revision: 203315

URL: http://llvm.org/viewvc/llvm-project?rev=203315&view=rev
Log:
[DAGCombiner] Recognize another rotation idiom

This is the new idiom:

  x<<(y&31) | x>>((0-y)&31)

which is recognized as:

  x ROTL (y&31)

The change refines matchRotateSub.  In
Neg & (OpSize - 1) == (OpSize - Pos) & (OpSize - 1), if Pos is
Pos' & (OpSize - 1) we can just use Pos' instead of Pos.

Added:
    llvm/trunk/test/CodeGen/X86/rotate4.ll
Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=203315&r1=203314&r2=203315&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Mar  7 17:56:28 2014
@@ -3466,6 +3466,14 @@ static bool matchRotateSub(SDValue Pos,
     return 0;
   SDValue NegOp1 = Neg.getOperand(1);
 
+  // On the RHS of [A], if Pos is Pos' & (OpSize - 1), just replace Pos with
+  // Pos'.  The truncation is redundant for the purpose of the equality.
+  if (MaskLoBits &&
+      Pos.getOpcode() == ISD::AND &&
+      Pos.getOperand(1).getOpcode() == ISD::Constant &&
+      cast<ConstantSDNode>(Pos.getOperand(1))->getAPIntValue() == OpSize - 1)
+    Pos = Pos.getOperand(0);
+
   // The condition we need is now:
   //
   //     (NegC - NegOp1) & Mask == (OpSize - Pos) & Mask

Added: llvm/trunk/test/CodeGen/X86/rotate4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/rotate4.ll?rev=203315&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/rotate4.ll (added)
+++ llvm/trunk/test/CodeGen/X86/rotate4.ll Fri Mar  7 17:56:28 2014
@@ -0,0 +1,126 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s
+
+; Check that we recognize this idiom for rotation too:
+;    a << (b & 31) | a >> ((0 - b) & 31)
+
+define i32 @rotate_left_32(i32 %a, i32 %b) {
+; CHECK-LABEL: rotate_left_32:
+; CHECK: roll
+entry:
+  %and = and i32 %b, 31
+  %shl = shl i32 %a, %and
+  %0 = sub i32 0, %b
+  %and3 = and i32 %0, 31
+  %shr = lshr i32 %a, %and3
+  %or = or i32 %shl, %shr
+  ret i32 %or
+}
+
+define i32 @rotate_right_32(i32 %a, i32 %b) {
+; CHECK-LABEL: rotate_right_32:
+; CHECK: rorl
+entry:
+  %and = and i32 %b, 31
+  %shl = lshr i32 %a, %and
+  %0 = sub i32 0, %b
+  %and3 = and i32 %0, 31
+  %shr = shl i32 %a, %and3
+  %or = or i32 %shl, %shr
+  ret i32 %or
+}
+
+define i64 @rotate_left_64(i64 %a, i64 %b) {
+; CHECK-LABEL: rotate_left_64:
+; CHECK: rolq
+entry:
+  %and = and i64 %b, 63
+  %shl = shl i64 %a, %and
+  %0 = sub i64 0, %b
+  %and3 = and i64 %0, 63
+  %shr = lshr i64 %a, %and3
+  %or = or i64 %shl, %shr
+  ret i64 %or
+}
+
+define i64 @rotate_right_64(i64 %a, i64 %b) {
+; CHECK-LABEL: rotate_right_64:
+; CHECK: rorq
+entry:
+  %and = and i64 %b, 63
+  %shl = lshr i64 %a, %and
+  %0 = sub i64 0, %b
+  %and3 = and i64 %0, 63
+  %shr = shl i64 %a, %and3
+  %or = or i64 %shl, %shr
+  ret i64 %or
+}
+
+; Also check mem operand.
+
+define void @rotate_left_m32(i32 *%pa, i32 %b) {
+; CHECK-LABEL: rotate_left_m32:
+; CHECK: roll
+; no store:
+; CHECK-NOT: mov
+entry:
+  %a = load i32* %pa, align 16
+  %and = and i32 %b, 31
+  %shl = shl i32 %a, %and
+  %0 = sub i32 0, %b
+  %and3 = and i32 %0, 31
+  %shr = lshr i32 %a, %and3
+  %or = or i32 %shl, %shr
+  store i32 %or, i32* %pa, align 32
+  ret void
+}
+
+define void @rotate_right_m32(i32 *%pa, i32 %b) {
+; CHECK-LABEL: rotate_right_m32:
+; CHECK: rorl
+; no store:
+; CHECK-NOT: mov
+entry:
+  %a = load i32* %pa, align 16
+  %and = and i32 %b, 31
+  %shl = lshr i32 %a, %and
+  %0 = sub i32 0, %b
+  %and3 = and i32 %0, 31
+  %shr = shl i32 %a, %and3
+  %or = or i32 %shl, %shr
+  store i32 %or, i32* %pa, align 32
+  ret void
+}
+
+define void @rotate_left_m64(i64 *%pa, i64 %b) {
+; CHECK-LABEL: rotate_left_m64:
+; CHECK: rolq
+; no store:
+; CHECK-NOT: mov
+entry:
+  %a = load i64* %pa, align 16
+  %and = and i64 %b, 63
+  %shl = shl i64 %a, %and
+  %0 = sub i64 0, %b
+  %and3 = and i64 %0, 63
+  %shr = lshr i64 %a, %and3
+  %or = or i64 %shl, %shr
+  store i64 %or, i64* %pa, align 64
+  ret void
+}
+
+define void @rotate_right_m64(i64 *%pa, i64 %b) {
+; CHECK-LABEL: rotate_right_m64:
+; CHECK: rorq
+; no store:
+; CHECK-NOT: mov
+entry:
+  %a = load i64* %pa, align 16
+  %and = and i64 %b, 63
+  %shl = lshr i64 %a, %and
+  %0 = sub i64 0, %b
+  %and3 = and i64 %0, 63
+  %shr = shl i64 %a, %and3
+  %or = or i64 %shl, %shr
+  store i64 %or, i64* %pa, align 64
+  ret void
+}





More information about the llvm-commits mailing list