[llvm] r305691 - [GlobalISel][X86] Fold FI/G_GEP into LDR/STR instruction addressing mode.

Igor Breger via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 19 06:12:58 PDT 2017


Author: ibreger
Date: Mon Jun 19 08:12:57 2017
New Revision: 305691

URL: http://llvm.org/viewvc/llvm-project?rev=305691&view=rev
Log:
[GlobalISel][X86] Fold FI/G_GEP into LDR/STR instruction addressing mode.

Summary: Implement some of the simplest addressing modes.It should help to test ABI.

Reviewers: zvi, guyblank

Reviewed By: guyblank

Subscribers: rovka, llvm-commits, kristof.beyls

Differential Revision: https://reviews.llvm.org/D33888

Modified:
    llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp
    llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
    llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir

Modified: llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86InstructionSelector.cpp Mon Jun 19 08:12:57 2017
@@ -326,6 +326,34 @@ unsigned X86InstructionSelector::getLoad
   return Opc;
 }
 
+// Fill in an address from the given instruction.
+void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI,
+                      X86AddressMode &AM) {
+
+  assert(I.getOperand(0).isReg() && "unsupported opperand.");
+  assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
+         "unsupported type.");
+
+  if (I.getOpcode() == TargetOpcode::G_GEP) {
+    if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
+      int64_t Imm = *COff;
+      if (isInt<32>(Imm)) { // Check for displacement overflow.
+        AM.Disp = static_cast<int32_t>(Imm);
+        AM.Base.Reg = I.getOperand(1).getReg();
+        return;
+      }
+    }
+  } else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
+    AM.Base.FrameIndex = I.getOperand(1).getIndex();
+    AM.BaseType = X86AddressMode::FrameIndexBase;
+    return;
+  }
+
+  // Default behavior.
+  AM.Base.Reg = I.getOperand(0).getReg();
+  return;
+}
+
 bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
                                                MachineRegisterInfo &MRI,
                                                MachineFunction &MF) const {
@@ -340,18 +368,28 @@ bool X86InstructionSelector::selectLoadS
   const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
 
   auto &MemOp = **I.memoperands_begin();
+  if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
+    DEBUG(dbgs() << "Atomic load/store not supported yet\n");
+    return false;
+  }
+
   unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
   if (NewOpc == Opc)
     return false;
 
+  X86AddressMode AM;
+  X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
+
   I.setDesc(TII.get(NewOpc));
   MachineInstrBuilder MIB(MF, I);
-  if (Opc == TargetOpcode::G_LOAD)
-    addOffset(MIB, 0);
-  else {
+  if (Opc == TargetOpcode::G_LOAD) {
+    I.RemoveOperand(1);
+    addFullAddress(MIB, AM);
+  } else {
     // G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
+    I.RemoveOperand(1);
     I.RemoveOperand(0);
-    addOffset(MIB, 0).addUse(DefReg);
+    addFullAddress(MIB, AM).addUse(DefReg);
   }
   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
 }

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll Mon Jun 19 08:12:57 2017
@@ -18,18 +18,10 @@ define i64 @test_add_i64(i64 %arg1, i64
 ; X32-NEXT:    movl %esp, %ebp
 ; X32-NEXT:  .Lcfi2:
 ; X32-NEXT:    .cfi_def_cfa_register %ebp
-; X32-NEXT:    pushl %esi
-; X32-NEXT:  .Lcfi3:
-; X32-NEXT:    .cfi_offset %esi, -12
-; X32-NEXT:    leal 8(%ebp), %ecx
-; X32-NEXT:    leal 12(%ebp), %esi
-; X32-NEXT:    leal 16(%ebp), %eax
-; X32-NEXT:    movl (%eax), %eax
-; X32-NEXT:    leal 20(%ebp), %edx
-; X32-NEXT:    movl (%edx), %edx
-; X32-NEXT:    addl (%ecx), %eax
-; X32-NEXT:    adcl (%esi), %edx
-; X32-NEXT:    popl %esi
+; X32-NEXT:    movl 16(%ebp), %eax
+; X32-NEXT:    movl 20(%ebp), %edx
+; X32-NEXT:    addl 8(%ebp), %eax
+; X32-NEXT:    adcl 12(%ebp), %edx
 ; X32-NEXT:    popl %ebp
 ; X32-NEXT:    retl
   %ret = add i64 %arg1, %arg2
@@ -46,10 +38,8 @@ define i32 @test_add_i32(i32 %arg1, i32
 ;
 ; X32-LABEL: test_add_i32:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %ecx
-; X32-NEXT:    leal 8(%esp), %eax
-; X32-NEXT:    movl (%eax), %eax
-; X32-NEXT:    addl (%ecx), %eax
+; X32-NEXT:    movl 8(%esp), %eax
+; X32-NEXT:    addl 4(%esp), %eax
 ; X32-NEXT:    retl
   %ret = add i32 %arg1, %arg2
   ret i32 %ret
@@ -66,10 +56,8 @@ define i16 @test_add_i16(i16 %arg1, i16
 ;
 ; X32-LABEL: test_add_i16:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %ecx
-; X32-NEXT:    leal 8(%esp), %eax
-; X32-NEXT:    movzwl (%eax), %eax
-; X32-NEXT:    addw (%ecx), %ax
+; X32-NEXT:    movzwl 8(%esp), %eax
+; X32-NEXT:    addw 4(%esp), %ax
 ; X32-NEXT:    retl
   %ret = add i16 %arg1, %arg2
   ret i16 %ret
@@ -84,10 +72,8 @@ define i8 @test_add_i8(i8 %arg1, i8 %arg
 ;
 ; X32-LABEL: test_add_i8:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %ecx
-; X32-NEXT:    leal 8(%esp), %eax
-; X32-NEXT:    movb (%eax), %al
-; X32-NEXT:    addb (%ecx), %al
+; X32-NEXT:    movb 8(%esp), %al
+; X32-NEXT:    addb 4(%esp), %al
 ; X32-NEXT:    retl
   %ret = add i8 %arg1, %arg2
   ret i8 %ret

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/callingconv.ll Mon Jun 19 08:12:57 2017
@@ -38,16 +38,10 @@ define i64 @test_ret_i64() {
 }
 
 define i8 @test_arg_i8(i8 %a) {
-; X32_GISEL-LABEL: test_arg_i8:
-; X32_GISEL:       # BB#0:
-; X32_GISEL-NEXT:    leal 4(%esp), %eax
-; X32_GISEL-NEXT:    movb (%eax), %al
-; X32_GISEL-NEXT:    retl
-;
-; X32_ISEL-LABEL: test_arg_i8:
-; X32_ISEL:       # BB#0:
-; X32_ISEL-NEXT:    movb 4(%esp), %al
-; X32_ISEL-NEXT:    retl
+; X32-LABEL: test_arg_i8:
+; X32:       # BB#0:
+; X32-NEXT:    movb 4(%esp), %al
+; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i8:
 ; X64:       # BB#0:
@@ -57,16 +51,10 @@ define i8 @test_arg_i8(i8 %a) {
 }
 
 define i16 @test_arg_i16(i16 %a) {
-; X32_GISEL-LABEL: test_arg_i16:
-; X32_GISEL:       # BB#0:
-; X32_GISEL-NEXT:    leal 4(%esp), %eax
-; X32_GISEL-NEXT:    movzwl (%eax), %eax
-; X32_GISEL-NEXT:    retl
-;
-; X32_ISEL-LABEL: test_arg_i16:
-; X32_ISEL:       # BB#0:
-; X32_ISEL-NEXT:    movzwl 4(%esp), %eax
-; X32_ISEL-NEXT:    retl
+; X32-LABEL: test_arg_i16:
+; X32:       # BB#0:
+; X32-NEXT:    movzwl 4(%esp), %eax
+; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i16:
 ; X64:       # BB#0:
@@ -76,16 +64,10 @@ define i16 @test_arg_i16(i16 %a) {
 }
 
 define i32 @test_arg_i32(i32 %a) {
-; X32_GISEL-LABEL: test_arg_i32:
-; X32_GISEL:       # BB#0:
-; X32_GISEL-NEXT:    leal 4(%esp), %eax
-; X32_GISEL-NEXT:    movl (%eax), %eax
-; X32_GISEL-NEXT:    retl
-;
-; X32_ISEL-LABEL: test_arg_i32:
-; X32_ISEL:       # BB#0:
-; X32_ISEL-NEXT:    movl 4(%esp), %eax
-; X32_ISEL-NEXT:    retl
+; X32-LABEL: test_arg_i32:
+; X32:       # BB#0:
+; X32-NEXT:    movl 4(%esp), %eax
+; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i32:
 ; X64:       # BB#0:
@@ -95,19 +77,11 @@ define i32 @test_arg_i32(i32 %a) {
 }
 
 define i64 @test_arg_i64(i64 %a) {
-; X32_GISEL-LABEL: test_arg_i64:
-; X32_GISEL:       # BB#0:
-; X32_GISEL-NEXT:    leal 4(%esp), %eax
-; X32_GISEL-NEXT:    movl (%eax), %eax
-; X32_GISEL-NEXT:    leal 8(%esp), %ecx
-; X32_GISEL-NEXT:    movl (%ecx), %edx
-; X32_GISEL-NEXT:    retl
-;
-; X32_ISEL-LABEL: test_arg_i64:
-; X32_ISEL:       # BB#0:
-; X32_ISEL-NEXT:    movl 4(%esp), %eax
-; X32_ISEL-NEXT:    movl 8(%esp), %edx
-; X32_ISEL-NEXT:    retl
+; X32-LABEL: test_arg_i64:
+; X32:       # BB#0:
+; X32-NEXT:    movl 4(%esp), %eax
+; X32-NEXT:    movl 8(%esp), %edx
+; X32-NEXT:    retl
 ;
 ; X64-LABEL: test_arg_i64:
 ; X64:       # BB#0:
@@ -117,30 +91,16 @@ define i64 @test_arg_i64(i64 %a) {
 }
 
 define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
-; X32_GISEL-LABEL: test_i64_args_8:
-; X32_GISEL:       # BB#0:
-; X32_GISEL-NEXT:    leal 60(%esp), %eax
-; X32_GISEL-NEXT:    movl (%eax), %eax
-; X32_GISEL-NEXT:    leal 64(%esp), %ecx
-; X32_GISEL-NEXT:    movl (%ecx), %edx
-; X32_GISEL-NEXT:    retl
+; X32-LABEL: test_i64_args_8:
+; X32:       # BB#0:
+; X32-NEXT:    movl 60(%esp), %eax
+; X32-NEXT:    movl 64(%esp), %edx
+; X32-NEXT:    retl
 ;
-; X32_ISEL-LABEL: test_i64_args_8:
-; X32_ISEL:       # BB#0:
-; X32_ISEL-NEXT:    movl 60(%esp), %eax
-; X32_ISEL-NEXT:    movl 64(%esp), %edx
-; X32_ISEL-NEXT:    retl
-;
-; X64_GISEL-LABEL: test_i64_args_8:
-; X64_GISEL:       # BB#0:
-; X64_GISEL-NEXT:    leaq 16(%rsp), %rax
-; X64_GISEL-NEXT:    movq (%rax), %rax
-; X64_GISEL-NEXT:    retq
-;
-; X64_ISEL-LABEL: test_i64_args_8:
-; X64_ISEL:       # BB#0:
-; X64_ISEL-NEXT:    movq 16(%rsp), %rax
-; X64_ISEL-NEXT:    retq
+; X64-LABEL: test_i64_args_8:
+; X64:       # BB#0:
+; X64-NEXT:    movq 16(%rsp), %rax
+; X64-NEXT:    retq
 
   ret i64 %arg8
 }

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll Mon Jun 19 08:12:57 2017
@@ -11,8 +11,7 @@ define i32 @test_zext_i1(i32 %a) {
 ;
 ; X32-LABEL: test_zext_i1:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %eax
-; X32-NEXT:    movl (%eax), %eax
+; X32-NEXT:    movl 4(%esp), %eax
 ; X32-NEXT:    andl $1, %eax
 ; X32-NEXT:    retl
   %val = trunc i32 %a to i1
@@ -28,8 +27,7 @@ define i32 @test_zext_i8(i8 %val) {
 ;
 ; X32-LABEL: test_zext_i8:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %eax
-; X32-NEXT:    movzbl (%eax), %eax
+; X32-NEXT:    movzbl 4(%esp), %eax
 ; X32-NEXT:    retl
   %r = zext i8 %val to i32
   ret i32 %r
@@ -43,8 +41,7 @@ define i32 @test_zext_i16(i16 %val) {
 ;
 ; X32-LABEL: test_zext_i16:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %eax
-; X32-NEXT:    movzwl (%eax), %eax
+; X32-NEXT:    movzwl 4(%esp), %eax
 ; X32-NEXT:    retl
   %r = zext i16 %val to i32
   ret i32 %r
@@ -58,8 +55,7 @@ define i32 @test_sext_i8(i8 %val) {
 ;
 ; X32-LABEL: test_sext_i8:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %eax
-; X32-NEXT:    movsbl (%eax), %eax
+; X32-NEXT:    movsbl 4(%esp), %eax
 ; X32-NEXT:    retl
   %r = sext i8 %val to i32
   ret i32 %r
@@ -73,8 +69,7 @@ define i32 @test_sext_i16(i16 %val) {
 ;
 ; X32-LABEL: test_sext_i16:
 ; X32:       # BB#0:
-; X32-NEXT:    leal 4(%esp), %eax
-; X32-NEXT:    movswl (%eax), %eax
+; X32-NEXT:    movswl 4(%esp), %eax
 ; X32-NEXT:    retl
   %r = sext i16 %val to i32
   ret i32 %r

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll Mon Jun 19 08:12:57 2017
@@ -7,8 +7,7 @@
 define i8 @test_load_i8(i8 * %p1) {
 ; ALL-LABEL: test_load_i8:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
+; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movb (%eax), %al
 ; ALL-NEXT:    retl
   %r = load i8, i8* %p1
@@ -18,8 +17,7 @@ define i8 @test_load_i8(i8 * %p1) {
 define i16 @test_load_i16(i16 * %p1) {
 ; ALL-LABEL: test_load_i16:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
+; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movzwl (%eax), %eax
 ; ALL-NEXT:    retl
   %r = load i16, i16* %p1
@@ -29,8 +27,7 @@ define i16 @test_load_i16(i16 * %p1) {
 define i32 @test_load_i32(i32 * %p1) {
 ; ALL-LABEL: test_load_i32:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
+; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movl (%eax), %eax
 ; ALL-NEXT:    retl
   %r = load i32, i32* %p1
@@ -40,10 +37,8 @@ define i32 @test_load_i32(i32 * %p1) {
 define i8 * @test_store_i8(i8 %val, i8 * %p1) {
 ; ALL-LABEL: test_store_i8:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movb (%eax), %cl
-; ALL-NEXT:    leal 8(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
+; ALL-NEXT:    movb 4(%esp), %cl
+; ALL-NEXT:    movl 8(%esp), %eax
 ; ALL-NEXT:    movb %cl, (%eax)
 ; ALL-NEXT:    retl
   store i8 %val, i8* %p1
@@ -53,10 +48,8 @@ define i8 * @test_store_i8(i8 %val, i8 *
 define i16 * @test_store_i16(i16 %val, i16 * %p1) {
 ; ALL-LABEL: test_store_i16:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movzwl (%eax), %ecx
-; ALL-NEXT:    leal 8(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
+; ALL-NEXT:    movzwl 4(%esp), %ecx
+; ALL-NEXT:    movl 8(%esp), %eax
 ; ALL-NEXT:    movw %cx, (%eax)
 ; ALL-NEXT:    retl
   store i16 %val, i16* %p1
@@ -66,10 +59,8 @@ define i16 * @test_store_i16(i16 %val, i
 define i32 * @test_store_i32(i32 %val, i32 * %p1) {
 ; ALL-LABEL: test_store_i32:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %ecx
-; ALL-NEXT:    leal 8(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
+; ALL-NEXT:    movl 4(%esp), %ecx
+; ALL-NEXT:    movl 8(%esp), %eax
 ; ALL-NEXT:    movl %ecx, (%eax)
 ; ALL-NEXT:    retl
   store i32 %val, i32* %p1
@@ -79,8 +70,7 @@ define i32 * @test_store_i32(i32 %val, i
 define i32* @test_load_ptr(i32** %ptr1) {
 ; ALL-LABEL: test_load_ptr:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
+; ALL-NEXT:    movl 4(%esp), %eax
 ; ALL-NEXT:    movl (%eax), %eax
 ; ALL-NEXT:    retl
   %p = load i32*, i32** %ptr1
@@ -90,10 +80,8 @@ define i32* @test_load_ptr(i32** %ptr1)
 define void @test_store_ptr(i32** %ptr1, i32* %a) {
 ; ALL-LABEL: test_store_ptr:
 ; ALL:       # BB#0:
-; ALL-NEXT:    leal 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
-; ALL-NEXT:    leal 8(%esp), %ecx
-; ALL-NEXT:    movl (%ecx), %ecx
+; ALL-NEXT:    movl 4(%esp), %eax
+; ALL-NEXT:    movl 8(%esp), %ecx
 ; ALL-NEXT:    movl %ecx, (%eax)
 ; ALL-NEXT:    retl
   store i32* %a, i32** %ptr1

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/memop-scalar.ll Mon Jun 19 08:12:57 2017
@@ -45,11 +45,11 @@ define float @test_load_float(float * %p
 ; SSE-NEXT:    movd %eax, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_load_float:
-; ALL_AVX:       # BB#0:
-; ALL_AVX-NEXT:    movl (%rdi), %eax
-; ALL_AVX-NEXT:    vmovd %eax, %xmm0
-; ALL_AVX-NEXT:    retq
+; ALL-LABEL: test_load_float:
+; ALL:       # BB#0:
+; ALL-NEXT:    movl (%rdi), %eax
+; ALL-NEXT:    movd %eax, %xmm0
+; ALL-NEXT:    retq
   %r = load float, float* %p1
   ret float %r
 }
@@ -61,11 +61,11 @@ define double @test_load_double(double *
 ; SSE-NEXT:    movq %rax, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_load_double:
-; ALL_AVX:       # BB#0:
-; ALL_AVX-NEXT:    movq (%rdi), %rax
-; ALL_AVX-NEXT:    vmovq %rax, %xmm0
-; ALL_AVX-NEXT:    retq
+; ALL-LABEL: test_load_double:
+; ALL:       # BB#0:
+; ALL-NEXT:    movq (%rdi), %rax
+; ALL-NEXT:    movq %rax, %xmm0
+; ALL-NEXT:    retq
   %r = load double, double* %p1
   ret double %r
 }
@@ -122,7 +122,6 @@ define double * @test_store_double(doubl
 ; SSE_GREEDY-NEXT:    movsd %xmm0, (%rdi)
 ; SSE_GREEDY-NEXT:    movq %rdi, %rax
 ; SSE_GREEDY-NEXT:    retq
-;
   store double %val, double* %p1
   ret double * %p1;
 }
@@ -144,3 +143,30 @@ define void @test_store_ptr(i32** %ptr1,
   store i32* %a, i32** %ptr1
   ret void
 }
+
+define i32 @test_gep_folding(i32* %arr, i32 %val) {
+; ALL-LABEL: test_gep_folding:
+; ALL:       # BB#0:
+; ALL-NEXT:    movl %esi, 20(%rdi)
+; ALL-NEXT:    movl 20(%rdi), %eax
+; ALL-NEXT:    retq
+  %arrayidx = getelementptr i32, i32* %arr, i32 5
+  store i32 %val, i32* %arrayidx
+  %r = load i32, i32* %arrayidx
+  ret i32 %r
+}
+
+; check that gep index doesn't folded into memory operand
+define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) {
+; ALL-LABEL: test_gep_folding_largeGepIndex:
+; ALL:       # BB#0:
+; ALL-NEXT:    movabsq $228719476720, %rax # imm = 0x3540BE3FF0
+; ALL-NEXT:    leaq (%rdi,%rax), %rax
+; ALL-NEXT:    movl %esi, (%rax)
+; ALL-NEXT:    movl (%rax), %eax
+; ALL-NEXT:    retq
+  %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
+  store i32 %val, i32* %arrayidx
+  %r = load i32, i32* %arrayidx
+  ret i32 %r
+}

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir Mon Jun 19 08:12:57 2017
@@ -50,7 +50,7 @@ legalized:       true
 regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 1, class: gpr, preferred-register: '' }
 # ALL-NEXT:   - { id: 2, class: gr8, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
@@ -58,8 +58,7 @@ registers:
   - { id: 2, class: gpr }
 fixedStack:
   - { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL:          %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
 # ALL-NEXT:     %2 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.p1)
 # ALL-NEXT:     %al = COPY %2
 # ALL-NEXT:     RET 0, implicit %al
@@ -80,7 +79,7 @@ legalized:       true
 regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 1, class: gpr, preferred-register: '' }
 # ALL-NEXT:   - { id: 2, class: gr16, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
@@ -88,8 +87,7 @@ registers:
   - { id: 2, class: gpr }
 fixedStack:
   - { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL:          %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
 # ALL-NEXT:     %2 = MOV16rm %0, 1, _, 0, _ :: (load 2 from %ir.p1)
 # ALL-NEXT:     %ax = COPY %2
 # ALL-NEXT:     RET 0, implicit %ax
@@ -110,7 +108,7 @@ legalized:       true
 regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 1, class: gpr, preferred-register: '' }
 # ALL-NEXT:   - { id: 2, class: gr32, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
@@ -118,8 +116,7 @@ registers:
   - { id: 2, class: gpr }
 fixedStack:
   - { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL:          %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
 # ALL-NEXT:     %2 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.p1)
 # ALL-NEXT:     %eax = COPY %2
 # ALL-NEXT:     RET 0, implicit %eax
@@ -141,8 +138,8 @@ regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr8, preferred-register: '' }
 # ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT:   - { id: 3, class: gpr, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
   - { id: 1, class: gpr }
@@ -151,10 +148,8 @@ registers:
 fixedStack:
   - { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
   - { id: 1, offset: 0, size: 1, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV8rm %2, 1, _, 0, _ :: (invariant load 1 from %fixed-stack.0, align 0)
-# ALL-NEXT:     %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT:     %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL:          %0 = MOV8rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 1 from %fixed-stack.0, align 0)
+# ALL-NEXT:     %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
 # ALL-NEXT:     MOV8mr %1, 1, _, 0, _, %0 :: (store 1 into %ir.p1)
 # ALL-NEXT:     %eax = COPY %1
 # ALL-NEXT:     RET 0, implicit %eax
@@ -178,8 +173,8 @@ regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr16, preferred-register: '' }
 # ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT:   - { id: 3, class: gpr, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
   - { id: 1, class: gpr }
@@ -188,10 +183,8 @@ registers:
 fixedStack:
   - { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
   - { id: 1, offset: 0, size: 2, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV16rm %2, 1, _, 0, _ :: (invariant load 2 from %fixed-stack.0, align 0)
-# ALL-NEXT:     %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT:     %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL:          %0 = MOV16rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 2 from %fixed-stack.0, align 0)
+# ALL-NEXT:     %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
 # ALL-NEXT:     MOV16mr %1, 1, _, 0, _, %0 :: (store 2 into %ir.p1)
 # ALL-NEXT:     %eax = COPY %1
 # ALL-NEXT:     RET 0, implicit %eax
@@ -215,8 +208,8 @@ regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr32, preferred-register: '' }
 # ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT:   - { id: 3, class: gpr, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
   - { id: 1, class: gpr }
@@ -225,10 +218,8 @@ registers:
 fixedStack:
   - { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
   - { id: 1, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV32rm %2, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
-# ALL-NEXT:     %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT:     %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL:          %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL-NEXT:     %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
 # ALL-NEXT:     MOV32mr %1, 1, _, 0, _, %0 :: (store 4 into %ir.p1)
 # ALL-NEXT:     %eax = COPY %1
 # ALL-NEXT:     RET 0, implicit %eax
@@ -251,7 +242,7 @@ legalized:       true
 regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 1, class: gpr, preferred-register: '' }
 # ALL-NEXT:   - { id: 2, class: gr32, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
@@ -259,8 +250,7 @@ registers:
   - { id: 2, class: gpr }
 fixedStack:
   - { id: 0, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %1 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV32rm %1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL:          %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
 # ALL-NEXT:     %2 = MOV32rm %0, 1, _, 0, _ :: (load 4 from %ir.ptr1)
 # ALL-NEXT:     %eax = COPY %2
 # ALL-NEXT:     RET 0, implicit %eax
@@ -282,8 +272,8 @@ regBankSelected: true
 # ALL:      registers:
 # ALL-NEXT:   - { id: 0, class: gr32, preferred-register: '' }
 # ALL-NEXT:   - { id: 1, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 2, class: gr32, preferred-register: '' }
-# ALL-NEXT:   - { id: 3, class: gr32, preferred-register: '' }
+# ALL-NEXT:   - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT:   - { id: 3, class: gpr, preferred-register: '' }
 registers:
   - { id: 0, class: gpr }
   - { id: 1, class: gpr }
@@ -292,10 +282,8 @@ registers:
 fixedStack:
   - { id: 0, offset: 4, size: 4, alignment: 4, isImmutable: true, isAliased: false }
   - { id: 1, offset: 0, size: 4, alignment: 16, isImmutable: true, isAliased: false }
-# ALL:          %2 = LEA32r %fixed-stack.0, 1, _, 0, _
-# ALL-NEXT:     %0 = MOV32rm %2, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
-# ALL-NEXT:     %3 = LEA32r %fixed-stack.1, 1, _, 0, _
-# ALL-NEXT:     %1 = MOV32rm %3, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
+# ALL:          %0 = MOV32rm %fixed-stack.0, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.0, align 0)
+# ALL-NEXT:     %1 = MOV32rm %fixed-stack.1, 1, _, 0, _ :: (invariant load 4 from %fixed-stack.1, align 0)
 # ALL-NEXT:     MOV32mr %0, 1, _, 0, _, %1 :: (store 4 into %ir.ptr1)
 # ALL-NEXT:     RET 0
 body:             |

Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir?rev=305691&r1=305690&r2=305691&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir Mon Jun 19 08:12:57 2017
@@ -83,6 +83,20 @@
     store i32* %a, i32** %ptr1
     ret void
   }
+
+  define i32 @test_gep_folding(i32* %arr, i32 %val) {
+    %arrayidx = getelementptr i32, i32* %arr, i32 5
+    store i32 %val, i32* %arrayidx
+    %r = load i32, i32* %arrayidx
+    ret i32 %r
+  }
+
+  define i32 @test_gep_folding_largeGepIndex(i32* %arr, i32 %val) #0 {
+    %arrayidx = getelementptr i32, i32* %arr, i64 57179869180
+    store i32 %val, i32* %arrayidx
+    %r = load i32, i32* %arrayidx
+    ret i32 %r
+  }
 ...
 ---
 # ALL-LABEL: name:            test_load_i8
@@ -498,3 +512,81 @@ body:             |
     RET 0
 
 ...
+---
+name:            test_gep_folding
+# ALL-LABEL: name:  test_gep_folding
+alignment:       4
+legalized:       true
+regBankSelected: true
+# ALL:              registers:
+# ALL-NEXT:           - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT:           - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT:           - { id: 2, class: gpr, preferred-register: '' }
+# ALL-NEXT:           - { id: 3, class: gpr, preferred-register: '' }
+# ALL-NEXT:           - { id: 4, class: gr32, preferred-register: '' }
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+  - { id: 3, class: gpr }
+  - { id: 4, class: gpr }
+# ALL:                  %0 = COPY %rdi
+# ALL-NEXT:             %1 = COPY %esi
+# ALL-NEXT:             MOV32mr %0, 1, _, 20, _, %1 :: (store 4 into %ir.arrayidx)
+# ALL-NEXT:             %4 = MOV32rm %0, 1, _, 20, _ :: (load 4 from %ir.arrayidx)
+# ALL-NEXT:             %eax = COPY %4
+# ALL-NEXT:             RET 0, implicit %eax
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: %esi, %rdi
+
+    %0(p0) = COPY %rdi
+    %1(s32) = COPY %esi
+    %2(s64) = G_CONSTANT i64 20
+    %3(p0) = G_GEP %0, %2(s64)
+    G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
+    %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
+    %eax = COPY %4(s32)
+    RET 0, implicit %eax
+
+...
+---
+name:            test_gep_folding_largeGepIndex
+# ALL-LABEL: name:  test_gep_folding_largeGepIndex
+alignment:       4
+legalized:       true
+regBankSelected: true
+# ALL:              registers:
+# ALL-NEXT:           - { id: 0, class: gr64, preferred-register: '' }
+# ALL-NEXT:           - { id: 1, class: gr32, preferred-register: '' }
+# ALL-NEXT:           - { id: 2, class: gr64_nosp, preferred-register: '' }
+# ALL-NEXT:           - { id: 3, class: gr64, preferred-register: '' }
+# ALL-NEXT:           - { id: 4, class: gr32, preferred-register: '' }
+registers:
+  - { id: 0, class: gpr }
+  - { id: 1, class: gpr }
+  - { id: 2, class: gpr }
+  - { id: 3, class: gpr }
+  - { id: 4, class: gpr }
+# ALL:                  %0 = COPY %rdi
+# ALL-NEXT:             %1 = COPY %esi
+# ALL-NEXT:             %2 = MOV64ri 228719476720
+# ALL-NEXT:             %3 = LEA64r %0, 1, %2, 0, _
+# ALL-NEXT:             MOV32mr %3, 1, _, 0, _, %1 :: (store 4 into %ir.arrayidx)
+# ALL-NEXT:             %4 = MOV32rm %3, 1, _, 0, _ :: (load 4 from %ir.arrayidx)
+# ALL-NEXT:             %eax = COPY %4
+# ALL-NEXT:             RET 0, implicit %eax
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: %esi, %rdi
+
+    %0(p0) = COPY %rdi
+    %1(s32) = COPY %esi
+    %2(s64) = G_CONSTANT i64 228719476720
+    %3(p0) = G_GEP %0, %2(s64)
+    G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
+    %4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
+    %eax = COPY %4(s32)
+    RET 0, implicit %eax
+
+...




More information about the llvm-commits mailing list