[llvm-commits] [llvm] r44970 - in /llvm/trunk: lib/Target/X86/X86ISelDAGToDAG.cpp lib/Target/X86/X86InstrInfo.td test/CodeGen/X86/fold-and-shift.ll

Evan Cheng evan.cheng at apple.com
Wed Dec 12 16:43:27 PST 2007


Author: evancheng
Date: Wed Dec 12 18:43:27 2007
New Revision: 44970

URL: http://llvm.org/viewvc/llvm-project?rev=44970&view=rev
Log:
Fold some and + shift in x86 addressing mode.

Added:
    llvm/trunk/test/CodeGen/X86/fold-and-shift.ll
Modified:
    llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/trunk/lib/Target/X86/X86InstrInfo.td

Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=44970&r1=44969&r2=44970&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Wed Dec 12 18:43:27 2007
@@ -582,7 +582,7 @@
   }
 
   int id = N.Val->getNodeId();
-  bool Available = isSelected(id);
+  bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
 
   switch (N.getOpcode()) {
   default: break;
@@ -605,7 +605,7 @@
     // If value is available in a register both base and index components have
     // been picked, we can't fit the result available in the register in the
     // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
-    if (!Available || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
+    if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
       bool isStatic = TM.getRelocationModel() == Reloc::Static;
       SDOperand N0 = N.getOperand(0);
       // Mac OS X X86-64 lower 4G address is not available.
@@ -653,7 +653,7 @@
     break;
 
   case ISD::SHL:
-    if (Available || AM.IndexReg.Val != 0 || AM.Scale != 1)
+    if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1)
       break;
       
     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
@@ -690,7 +690,7 @@
     // FALL THROUGH
   case ISD::MUL:
     // X*[3,5,9] -> X+X*[2,4,8]
-    if (!Available &&
+    if (!AlreadySelected &&
         AM.BaseType == X86ISelAddressMode::RegBase &&
         AM.Base.Reg.Val == 0 &&
         AM.IndexReg.Val == 0) {
@@ -725,7 +725,7 @@
     break;
 
   case ISD::ADD:
-    if (!Available) {
+    if (!AlreadySelected) {
       X86ISelAddressMode Backup = AM;
       if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
           !MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
@@ -740,7 +740,7 @@
 
   case ISD::OR:
     // Handle "X | C" as "X + C" iff X is known to have C bits clear.
-    if (Available) break;
+    if (AlreadySelected) break;
       
     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
       X86ISelAddressMode Backup = AM;
@@ -758,6 +758,44 @@
       AM = Backup;
     }
     break;
+      
+  case ISD::AND: {
+    // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
+    // allows us to fold the shift into this addressing mode.
+    if (AlreadySelected) break;
+    SDOperand Shift = N.getOperand(0);
+    if (Shift.getOpcode() != ISD::SHL) break;
+    
+    // Scale must not be used already.
+    if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
+      
+    ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
+    ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
+    if (!C1 || !C2) break;
+
+    // Not likely to be profitable if either the AND or SHIFT node has more
+    // than one use (unless all uses are for address computation). Besides,
+    // isel mechanism requires their node ids to be reused.
+    if (!N.hasOneUse() || !Shift.hasOneUse())
+      break;
+    
+    // Verify that the shift amount is something we can fold.
+    unsigned ShiftCst = C1->getValue();
+    if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
+      break;
+    
+    // Get the new AND mask, this folds to a constant.
+    SDOperand NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
+                                           SDOperand(C2, 0), SDOperand(C1, 0));
+    SDOperand NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
+                                       Shift.getOperand(0), NewANDMask);
+    NewANDMask.Val->setNodeId(Shift.Val->getNodeId());
+    NewAND.Val->setNodeId(N.Val->getNodeId());
+    
+    AM.Scale = 1 << ShiftCst;
+    AM.IndexReg = NewAND;
+    return false;
+  }
   }
 
   return MatchAddressBase(N, AM, isRoot, Depth);

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=44970&r1=44969&r2=44970&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Wed Dec 12 18:43:27 2007
@@ -2613,6 +2613,10 @@
 def : Pat<(i32 (anyext (loadi8  addr:$src))), (MOVZX32rm8  addr:$src)>;
 def : Pat<(i32 (anyext (loadi16 addr:$src))), (MOVZX32rm16 addr:$src)>;
 
+// (and (i32 load), 255) -> (zextload i8)
+def : Pat<(i32 (and (loadi32 addr:$src), (i32 255))), (MOVZX32rm8 addr:$src)>;
+def : Pat<(i32 (and (loadi32 addr:$src), (i32 65535))),(MOVZX32rm16 addr:$src)>;
+
 //===----------------------------------------------------------------------===//
 // Some peepholes
 //===----------------------------------------------------------------------===//

Added: llvm/trunk/test/CodeGen/X86/fold-and-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-and-shift.ll?rev=44970&view=auto

==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-and-shift.ll (added)
+++ llvm/trunk/test/CodeGen/X86/fold-and-shift.ll Wed Dec 12 18:43:27 2007
@@ -0,0 +1,21 @@
+; RUN: llvm-as < %s | llc -march=x86 | not grep and
+
+define i32 @t1(i8* %X, i32 %i) {
+entry:
+	%tmp2 = shl i32 %i, 2		; <i32> [#uses=1]
+	%tmp4 = and i32 %tmp2, 1020		; <i32> [#uses=1]
+	%tmp7 = getelementptr i8* %X, i32 %tmp4		; <i8*> [#uses=1]
+	%tmp78 = bitcast i8* %tmp7 to i32*		; <i32*> [#uses=1]
+	%tmp9 = load i32* %tmp78, align 4		; <i32> [#uses=1]
+	ret i32 %tmp9
+}
+
+define i32 @t2(i16* %X, i32 %i) {
+entry:
+	%tmp2 = shl i32 %i, 1		; <i32> [#uses=1]
+	%tmp4 = and i32 %tmp2, 131070		; <i32> [#uses=1]
+	%tmp7 = getelementptr i16* %X, i32 %tmp4		; <i16*> [#uses=1]
+	%tmp78 = bitcast i16* %tmp7 to i32*		; <i32*> [#uses=1]
+	%tmp9 = load i32* %tmp78, align 4		; <i32> [#uses=1]
+	ret i32 %tmp9
+}





More information about the llvm-commits mailing list