[llvm] r357846 - [X86] Use a signed mask in foldMaskedShiftToScaledMask to enable a shorter immediate encoding.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Apr 6 11:00:50 PDT 2019


Author: ctopper
Date: Sat Apr  6 11:00:50 2019
New Revision: 357846

URL: http://llvm.org/viewvc/llvm-project?rev=357846&view=rev
Log:
[X86] Use a signed mask in foldMaskedShiftToScaledMask to enable a shorter immediate encoding.

This function reorders AND and SHL to enable the SHL to fold into an LEA. The
upper bits of the AND will be shifted out by the SHL so it doesn't matter what
mask value we use for these bits. By using sign bits from the original mask in
these upper bits we might enable a shorter immediate encoding to be used.

Modified:
    llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
    llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll
    llvm/trunk/test/CodeGen/X86/fold-and-shift.ll

Modified: llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp?rev=357846&r1=357845&r2=357846&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelDAGToDAG.cpp Sat Apr  6 11:00:50 2019
@@ -1378,7 +1378,6 @@ static bool foldMaskAndShiftToExtract(Se
 // allows us to fold the shift into this addressing mode. Returns false if the
 // transform succeeded.
 static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
-                                        uint64_t Mask,
                                         SDValue Shift, SDValue X,
                                         X86ISelAddressMode &AM) {
   if (Shift.getOpcode() != ISD::SHL ||
@@ -1396,6 +1395,11 @@ static bool foldMaskedShiftToScaledMask(
   if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
     return true;
 
+  // Use a signed mask so that shifting right will insert sign bits. These
+  // bits will be removed when we shift the result left so it doesn't matter
+  // what we use. This might allow a smaller immediate encoding.
+  int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
+
   MVT VT = N.getSimpleValueType();
   SDLoc DL(N);
   SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
@@ -1863,7 +1867,7 @@ bool X86DAGToDAGISel::matchAddressRecurs
 
     // Try to swap the mask and shift to place shifts which can be done as
     // a scale on the outside of the mask.
-    if (!foldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
+    if (!foldMaskedShiftToScaledMask(*CurDAG, N, Shift, X, AM))
       return false;
 
     // Try to fold the mask and shift into BEXTR and scale.

Modified: llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll?rev=357846&r1=357845&r2=357846&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-and-shift-x86_64.ll Sat Apr  6 11:00:50 2019
@@ -4,9 +4,8 @@
 define i8 @t1(i8* %X, i64 %i) {
 ; CHECK-LABEL: t1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movabsq $4611686018427387649, %rax # imm = 0x3FFFFFFFFFFFFF01
-; CHECK-NEXT:    andq %rsi, %rax
-; CHECK-NEXT:    movb (%rdi,%rax,4), %al
+; CHECK-NEXT:    andq $-255, %rsi
+; CHECK-NEXT:    movb (%rdi,%rsi,4), %al
 ; CHECK-NEXT:    retq
 
 entry:
@@ -20,9 +19,8 @@ entry:
 define i8 @t2(i8* %X, i64 %i) {
 ; CHECK-LABEL: t2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    movabsq $4611686018427387890, %rax # imm = 0x3FFFFFFFFFFFFFF2
-; CHECK-NEXT:    andq %rsi, %rax
-; CHECK-NEXT:    movb (%rdi,%rax,4), %al
+; CHECK-NEXT:    andq $-14, %rsi
+; CHECK-NEXT:    movb (%rdi,%rsi,4), %al
 ; CHECK-NEXT:    retq
 
 entry:

Modified: llvm/trunk/test/CodeGen/X86/fold-and-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/fold-and-shift.ll?rev=357846&r1=357845&r2=357846&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/fold-and-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/fold-and-shift.ll Sat Apr  6 11:00:50 2019
@@ -95,7 +95,7 @@ define i8 @t5(i8* %X, i32 %i) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    andl $1073741810, %ecx # imm = 0x3FFFFFF2
+; CHECK-NEXT:    andl $-14, %ecx
 ; CHECK-NEXT:    movb (%eax,%ecx,4), %al
 ; CHECK-NEXT:    retl
 
@@ -112,7 +112,7 @@ define i8 @t6(i8* %X, i32 %i) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    andl $1073741569, %ecx # imm = 0x3FFFFF01
+; CHECK-NEXT:    andl $-255, %ecx
 ; CHECK-NEXT:    movb (%eax,%ecx,4), %al
 ; CHECK-NEXT:    retl
 




More information about the llvm-commits mailing list