[llvm] r296651 - [DAGCombiner] Support {a|s}ext, {a|z|s}ext load nodes in load combine

Artur Pilipenko via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 1 10:12:29 PST 2017


Author: apilipenko
Date: Wed Mar  1 12:12:29 2017
New Revision: 296651

URL: http://llvm.org/viewvc/llvm-project?rev=296651&view=rev
Log:
[DAGCombiner] Support {a|s}ext, {a|z|s}ext load nodes in load combine

Resubmit r295336 after the bug with non-zero offset patterns on BE targets is fixed (r296336).

Support {a|s}ext, {a|z|s}ext load nodes as a part of load combine patters.

Reviewed By: filcab

Differential Revision: https://reviews.llvm.org/D29591

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/trunk/test/CodeGen/AArch64/load-combine-big-endian.ll
    llvm/trunk/test/CodeGen/AArch64/load-combine.ll
    llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll
    llvm/trunk/test/CodeGen/ARM/fp16-promote.ll
    llvm/trunk/test/CodeGen/ARM/load-combine-big-endian.ll
    llvm/trunk/test/CodeGen/ARM/load-combine.ll
    llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll
    llvm/trunk/test/CodeGen/X86/load-combine.ll

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Wed Mar  1 12:12:29 2017
@@ -4506,6 +4506,8 @@ const Optional<ByteProvider> calculateBy
                : calculateByteProvider(Op->getOperand(0), Index - ByteShift,
                                        Depth + 1);
   }
+  case ISD::ANY_EXTEND:
+  case ISD::SIGN_EXTEND:
   case ISD::ZERO_EXTEND: {
     SDValue NarrowOp = Op->getOperand(0);
     unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits();
@@ -4513,22 +4515,32 @@ const Optional<ByteProvider> calculateBy
       return None;
     uint64_t NarrowByteWidth = NarrowBitWidth / 8;
 
-    return Index >= NarrowByteWidth
-               ? ByteProvider::getConstantZero()
-               : calculateByteProvider(NarrowOp, Index, Depth + 1);
+    if (Index >= NarrowByteWidth)
+      return Op.getOpcode() == ISD::ZERO_EXTEND
+                 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
+                 : None;
+    else
+      return calculateByteProvider(NarrowOp, Index, Depth + 1);
   }
   case ISD::BSWAP:
     return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1,
                                  Depth + 1);
   case ISD::LOAD: {
     auto L = cast<LoadSDNode>(Op.getNode());
+    if (L->isVolatile() || L->isIndexed())
+      return None;
 
-    // TODO: support ext loads
-    if (L->isVolatile() || L->isIndexed() ||
-        L->getExtensionType() != ISD::NON_EXTLOAD)
+    unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits();
+    if (NarrowBitWidth % 8 != 0)
       return None;
+    uint64_t NarrowByteWidth = NarrowBitWidth / 8;
 
-    return ByteProvider::getMemory(L, Index);
+    if (Index >= NarrowByteWidth)
+      return L->getExtensionType() == ISD::ZEXTLOAD
+                 ? Optional<ByteProvider>(ByteProvider::getConstantZero())
+                 : None;
+    else
+      return ByteProvider::getMemory(L, Index);
   }
   }
 
@@ -4617,7 +4629,6 @@ SDValue DAGCombiner::MatchLoadCombine(SD
 
     LoadSDNode *L = P->Load;
     assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() &&
-           (L->getExtensionType() == ISD::NON_EXTLOAD) &&
            "Must be enforced by calculateByteProvider");
     assert(L->getOffset().isUndef() && "Unindexed load must have undef offset");
 

Modified: llvm/trunk/test/CodeGen/AArch64/load-combine-big-endian.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/load-combine-big-endian.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/load-combine-big-endian.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/load-combine-big-endian.ll Wed Mar  1 12:12:29 2017
@@ -336,11 +336,8 @@ define i32 @load_i32_by_bswap_i16(i32* %
 ; (i32) p[1] | (sext(p[0] << 16) to i32)
 define i32 @load_i32_by_sext_i16(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_sext_i16:
-; CHECK: ldrh    w8, [x0]
-; CHECK-NEXT: ldrh  w0, [x0, #2]
-; CHECK-NEXT: bfi w0, w8, #16, #16
+; CHECK: ldr   w0, [x0]
 ; CHECK-NEXT: ret
-
   %tmp = bitcast i32* %arg to i16*
   %tmp1 = load i16, i16* %tmp, align 4
   %tmp2 = sext i16 %tmp1 to i32
@@ -399,7 +396,6 @@ define i32 @load_i32_by_i8_base_offset_i
 ; CHECK-NEXT: ldur  w8, [x8, #13]
 ; CHECK-NEXT: rev w0, w8
 ; CHECK-NEXT: ret
-
   %tmp = add nuw nsw i32 %i, 4
   %tmp2 = add nuw nsw i32 %i, 3
   %tmp3 = add nuw nsw i32 %i, 2

Modified: llvm/trunk/test/CodeGen/AArch64/load-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/load-combine.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/load-combine.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/load-combine.ll Wed Mar  1 12:12:29 2017
@@ -324,12 +324,8 @@ define i32 @load_i32_by_bswap_i16(i32* %
 ; (i32) p[0] | (sext(p[1] << 16) to i32)
 define i32 @load_i32_by_sext_i16(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_sext_i16:
-; CHECK: ldrh    w8, [x0]
-; CHECK-NEXT: ldrh  w9, [x0, #2]
-; CHECK-NEXT: bfi w8, w9, #16, #16
-; CHECK-NEXT: mov  w0, w8
+; CHECK: ldr   w0, [x0]
 ; CHECK-NEXT: ret
-
   %tmp = bitcast i32* %arg to i16*
   %tmp1 = load i16, i16* %tmp, align 4
   %tmp2 = zext i16 %tmp1 to i32
@@ -386,7 +382,6 @@ define i32 @load_i32_by_i8_base_offset_i
 ; CHECK: add x8, x0, w1, uxtw
 ; CHECK-NEXT: ldur  w0, [x8, #13]
 ; CHECK-NEXT: ret
-
   %tmp = add nuw nsw i32 %i, 4
   %tmp2 = add nuw nsw i32 %i, 3
   %tmp3 = add nuw nsw i32 %i, 2

Modified: llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll Wed Mar  1 12:12:29 2017
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=GCN-NO-TONGA %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=GCN-TONGA %s
 
 ; FIXME: Broken on evergreen
 ; FIXME: For some reason the 8 and 16 vectors are being stored as
@@ -219,10 +219,7 @@ define void @dynamic_insertelement_v3i16
 
 ; GCN: s_waitcnt
 
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
-; GCN: buffer_load_ushort
+; GCN: buffer_load_dwordx2
 
 ; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off
 define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind {
@@ -240,8 +237,9 @@ define void @dynamic_insertelement_v4i16
 
 ; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
 
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-TONGA: buffer_load_ushort
 
 ; GCN: buffer_store_short v{{[0-9]+}}, off
 define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind {
@@ -259,9 +257,11 @@ define void @dynamic_insertelement_v2i8(
 ; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:5
 ; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offset:6
 
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-TONGA: buffer_load_ushort
+; GCN-TONGA: buffer_load_ubyte
 
 ; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off
 ; GCN-DAG: buffer_store_short v{{[0-9]+}}, off
@@ -284,10 +284,11 @@ define void @dynamic_insertelement_v3i8(
 
 ; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
 
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
-; GCN: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-NO-TONGA: buffer_load_ubyte
+; GCN-TONGA: buffer_load_dword
 
 ; GCN: buffer_store_dword v{{[0-9]+}}, off
 define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind {

Modified: llvm/trunk/test/CodeGen/ARM/fp16-promote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp16-promote.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp16-promote.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/fp16-promote.ll Wed Mar  1 12:12:29 2017
@@ -847,21 +847,15 @@ define void @test_insertelement(half* %p
 }
 
 ; CHECK-ALL-LABEL: test_extractelement:
+; CHECK-VFP: push {{{.*}}, lr}
 ; CHECK-VFP: sub sp, sp, #8
-; CHECK-VFP: ldrh
-; CHECK-VFP: ldrh
-; CHECK-VFP: orr
-; CHECK-VFP: str
-; CHECK-VFP: ldrh
-; CHECK-VFP: ldrh
-; CHECK-VFP: orr
-; CHECK-VFP: str
+; CHECK-VFP: ldrd
 ; CHECK-VFP: mov
 ; CHECK-VFP: orr
 ; CHECK-VFP: ldrh
 ; CHECK-VFP: strh
 ; CHECK-VFP: add sp, sp, #8
-; CHECK-VFP: bx lr
+; CHECK-VFP: pop {{{.*}}, pc}
 ; CHECK-NOVFP: ldrh
 ; CHECK-NOVFP: strh
 ; CHECK-NOVFP: ldrh

Modified: llvm/trunk/test/CodeGen/ARM/load-combine-big-endian.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/load-combine-big-endian.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/load-combine-big-endian.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/load-combine-big-endian.ll Wed Mar  1 12:12:29 2017
@@ -456,17 +456,12 @@ define i32 @load_i32_by_bswap_i16(i32* %
 ; (i32) p[1] | (sext(p[0] << 16) to i32)
 define i32 @load_i32_by_sext_i16(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_sext_i16:
-; CHECK: ldrh  r1, [r0]
-; CHECK-NEXT: ldrh  r0, [r0, #2]
-; CHECK-NEXT: orr r0, r0, r1, lsl #16
+; CHECK: ldr  r0, [r0]
 ; CHECK-NEXT: mov pc, lr
-
+;
 ; CHECK-ARMv6-LABEL: load_i32_by_sext_i16:
-; CHECK-ARMv6: ldrh  r1, [r0]
-; CHECK-ARMv6-NEXT: ldrh  r0, [r0, #2]
-; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16
+; CHECK-ARMv6: ldr r0, [r0]
 ; CHECK-ARMv6-NEXT: bx  lr
-  
   %tmp = bitcast i32* %arg to i16*
   %tmp1 = load i16, i16* %tmp, align 4
   %tmp2 = sext i16 %tmp1 to i32

Modified: llvm/trunk/test/CodeGen/ARM/load-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/load-combine.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/load-combine.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/load-combine.ll Wed Mar  1 12:12:29 2017
@@ -414,17 +414,12 @@ define i32 @load_i32_by_bswap_i16(i32* %
 ; (i32) p[0] | (sext(p[1] << 16) to i32)
 define i32 @load_i32_by_sext_i16(i32* %arg) {
 ; CHECK-LABEL: load_i32_by_sext_i16:
-; CHECK: ldrh  r1, [r0, #2]
-; CHECK-NEXT: ldrh  r0, [r0]
-; CHECK-NEXT: orr r0, r0, r1, lsl #16
+; CHECK: ldr  r0, [r0]
 ; CHECK-NEXT: mov pc, lr
 ;
 ; CHECK-ARMv6-LABEL: load_i32_by_sext_i16:
-; CHECK-ARMv6: ldrh  r1, [r0, #2]
-; CHECK-ARMv6-NEXT: ldrh  r0, [r0]
-; CHECK-ARMv6-NEXT: orr r0, r0, r1, lsl #16
-; CHECK-ARMv6-NEXT: bx  lr
-  
+; CHECK-ARMv6: ldr  r0, [r0]
+; CHECK-ARMv6-NEXT: bx lr
   %tmp = bitcast i32* %arg to i16*
   %tmp1 = load i16, i16* %tmp, align 4
   %tmp2 = zext i16 %tmp1 to i32
@@ -492,7 +487,6 @@ define i32 @load_i32_by_i8_base_offset_i
 ; CHECK-ARMv6: add r0, r0, r1
 ; CHECK-ARMv6-NEXT: ldr r0, [r0, #13]
 ; CHECK-ARMv6-NEXT: bx  lr
-
   %tmp = add nuw nsw i32 %i, 4
   %tmp2 = add nuw nsw i32 %i, 3
   %tmp3 = add nuw nsw i32 %i, 2

Modified: llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll Wed Mar  1 12:12:29 2017
@@ -284,10 +284,7 @@ entry:
 ; CHECK-DAG: lfs 12, 12({{[0-9]+}})
 ; CHECK-DAG: lfs 13, 16({{[0-9]+}})
 
-; CHECK-DAG: lwz [[REG0:[0-9]+]], 0({{[0-9]+}})
-; CHECK-DAG: lwz [[REG1:[0-9]+]], 4({{[0-9]+}})
-; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 32
-; CHECK-DAG: or 10, [[REG0]], [[REG2]]
+; CHECK-DAG: ld 10, 0({{[0-9]+}})
 ; CHECK: bl test2
 
 declare void @test2([8 x float], [5 x float], [2 x float])

Modified: llvm/trunk/test/CodeGen/X86/load-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/load-combine.ll?rev=296651&r1=296650&r2=296651&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/load-combine.ll (original)
+++ llvm/trunk/test/CodeGen/X86/load-combine.ll Wed Mar  1 12:12:29 2017
@@ -772,20 +772,25 @@ define i32 @load_i32_by_i8_bswap_base_in
 ; BSWAP-NEXT:    bswapl %eax
 ; BSWAP-NEXT:    retl
 ;
-; CHECK64-LABEL: load_i32_by_i8_bswap_base_index_offset:
-; CHECK64:       # BB#0:
-; CHECK64-NEXT:    movslq %esi, %rax
-; CHECK64-NEXT:    movzbl (%rdi,%rax), %ecx
-; CHECK64-NEXT:    shll $24, %ecx
-; CHECK64-NEXT:    movzbl 1(%rdi,%rax), %edx
-; CHECK64-NEXT:    shll $16, %edx
-; CHECK64-NEXT:    orl %ecx, %edx
-; CHECK64-NEXT:    movzbl 2(%rdi,%rax), %ecx
-; CHECK64-NEXT:    shll $8, %ecx
-; CHECK64-NEXT:    orl %edx, %ecx
-; CHECK64-NEXT:    movzbl 3(%rdi,%rax), %eax
-; CHECK64-NEXT:    orl %ecx, %eax
-; CHECK64-NEXT:    retq
+; MOVBE-LABEL: load_i32_by_i8_bswap_base_index_offset:
+; MOVBE:       # BB#0:
+; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; MOVBE-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; MOVBE-NEXT:    movbel (%ecx,%eax), %eax
+; MOVBE-NEXT:    retl
+;
+; BSWAP64-LABEL: load_i32_by_i8_bswap_base_index_offset:
+; BSWAP64:       # BB#0:
+; BSWAP64-NEXT:    movslq %esi, %rax
+; BSWAP64-NEXT:    movl (%rdi,%rax), %eax
+; BSWAP64-NEXT:    bswapl %eax
+; BSWAP64-NEXT:    retq
+;
+; MOVBE64-LABEL: load_i32_by_i8_bswap_base_index_offset:
+; MOVBE64:       # BB#0:
+; MOVBE64-NEXT:    movslq %esi, %rax
+; MOVBE64-NEXT:    movbel (%rdi,%rax), %eax
+; MOVBE64-NEXT:    retq
   %tmp = bitcast i32* %arg to i8*
   %tmp2 = getelementptr inbounds i8, i8* %tmp, i32 %arg1
   %tmp3 = load i8, i8* %tmp2, align 1
@@ -886,18 +891,12 @@ define i32 @load_i32_by_sext_i16(i32* %a
 ; CHECK-LABEL: load_i32_by_sext_i16:
 ; CHECK:       # BB#0:
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; CHECK-NEXT:    movzwl (%eax), %ecx
-; CHECK-NEXT:    movzwl 2(%eax), %eax
-; CHECK-NEXT:    shll $16, %eax
-; CHECK-NEXT:    orl %ecx, %eax
+; CHECK-NEXT:    movl (%eax), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_sext_i16:
 ; CHECK64:       # BB#0:
-; CHECK64-NEXT:    movzwl (%rdi), %ecx
-; CHECK64-NEXT:    movzwl 2(%rdi), %eax
-; CHECK64-NEXT:    shll $16, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    movl (%rdi), %eax
 ; CHECK64-NEXT:    retq
   %tmp = bitcast i32* %arg to i16*
   %tmp1 = load i16, i16* %tmp, align 1
@@ -916,24 +915,9 @@ define i32 @load_i32_by_sext_i16(i32* %a
 define i32 @load_i32_by_i8_base_offset_index(i8* %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    pushl %esi
-; CHECK-NEXT:  .Lcfi4:
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:  .Lcfi5:
-; CHECK-NEXT:    .cfi_offset %esi, -8
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movzbl 12(%eax,%ecx), %edx
-; CHECK-NEXT:    movzbl 13(%eax,%ecx), %esi
-; CHECK-NEXT:    shll $8, %esi
-; CHECK-NEXT:    orl %edx, %esi
-; CHECK-NEXT:    movzbl 14(%eax,%ecx), %edx
-; CHECK-NEXT:    shll $16, %edx
-; CHECK-NEXT:    orl %esi, %edx
-; CHECK-NEXT:    movzbl 15(%eax,%ecx), %eax
-; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %edx, %eax
-; CHECK-NEXT:    popl %esi
+; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_base_offset_index:
@@ -976,24 +960,9 @@ define i32 @load_i32_by_i8_base_offset_i
 define i32 @load_i32_by_i8_base_offset_index_2(i8* %arg, i32 %i) {
 ; CHECK-LABEL: load_i32_by_i8_base_offset_index_2:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    pushl %esi
-; CHECK-NEXT:  .Lcfi6:
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:  .Lcfi7:
-; CHECK-NEXT:    .cfi_offset %esi, -8
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movzbl 13(%eax,%ecx), %edx
-; CHECK-NEXT:    movzbl 14(%eax,%ecx), %esi
-; CHECK-NEXT:    shll $8, %esi
-; CHECK-NEXT:    orl %edx, %esi
-; CHECK-NEXT:    movzbl 15(%eax,%ecx), %edx
-; CHECK-NEXT:    shll $16, %edx
-; CHECK-NEXT:    orl %esi, %edx
-; CHECK-NEXT:    movzbl 16(%eax,%ecx), %eax
-; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %edx, %eax
-; CHECK-NEXT:    popl %esi
+; CHECK-NEXT:    movl 13(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_base_offset_index_2:
@@ -1047,39 +1016,15 @@ define i32 @load_i32_by_i8_base_offset_i
 define i32 @load_i32_by_i8_zaext_loads(i8* %arg, i32 %arg1) {
 ; CHECK-LABEL: load_i32_by_i8_zaext_loads:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    pushl %esi
-; CHECK-NEXT:  .Lcfi8:
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:  .Lcfi9:
-; CHECK-NEXT:    .cfi_offset %esi, -8
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movzbl 12(%eax,%ecx), %edx
-; CHECK-NEXT:    movzbl 13(%eax,%ecx), %esi
-; CHECK-NEXT:    shll $8, %esi
-; CHECK-NEXT:    orl %edx, %esi
-; CHECK-NEXT:    movzbl 14(%eax,%ecx), %edx
-; CHECK-NEXT:    shll $16, %edx
-; CHECK-NEXT:    orl %esi, %edx
-; CHECK-NEXT:    movzbl 15(%eax,%ecx), %eax
-; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %edx, %eax
-; CHECK-NEXT:    popl %esi
+; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_zaext_loads:
 ; CHECK64:       # BB#0:
 ; CHECK64-NEXT:    movl %esi, %eax
-; CHECK64-NEXT:    movzbl 12(%rdi,%rax), %ecx
-; CHECK64-NEXT:    movzbl 13(%rdi,%rax), %edx
-; CHECK64-NEXT:    shll $8, %edx
-; CHECK64-NEXT:    orl %ecx, %edx
-; CHECK64-NEXT:    movzbl 14(%rdi,%rax), %ecx
-; CHECK64-NEXT:    shll $16, %ecx
-; CHECK64-NEXT:    orl %edx, %ecx
-; CHECK64-NEXT:    movzbl 15(%rdi,%rax), %eax
-; CHECK64-NEXT:    shll $24, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    movl 12(%rdi,%rax), %eax
 ; CHECK64-NEXT:    retq
   %tmp = add nuw nsw i32 %arg1, 3
   %tmp2 = add nuw nsw i32 %arg1, 2
@@ -1127,39 +1072,15 @@ define i32 @load_i32_by_i8_zaext_loads(i
 define i32 @load_i32_by_i8_zsext_loads(i8* %arg, i32 %arg1) {
 ; CHECK-LABEL: load_i32_by_i8_zsext_loads:
 ; CHECK:       # BB#0:
-; CHECK-NEXT:    pushl %esi
-; CHECK-NEXT:  .Lcfi10:
-; CHECK-NEXT:    .cfi_def_cfa_offset 8
-; CHECK-NEXT:  .Lcfi11:
-; CHECK-NEXT:    .cfi_offset %esi, -8
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; CHECK-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; CHECK-NEXT:    movzbl 12(%eax,%ecx), %edx
-; CHECK-NEXT:    movzbl 13(%eax,%ecx), %esi
-; CHECK-NEXT:    shll $8, %esi
-; CHECK-NEXT:    orl %edx, %esi
-; CHECK-NEXT:    movzbl 14(%eax,%ecx), %edx
-; CHECK-NEXT:    shll $16, %edx
-; CHECK-NEXT:    orl %esi, %edx
-; CHECK-NEXT:    movsbl 15(%eax,%ecx), %eax
-; CHECK-NEXT:    shll $24, %eax
-; CHECK-NEXT:    orl %edx, %eax
-; CHECK-NEXT:    popl %esi
+; CHECK-NEXT:    movl 12(%eax,%ecx), %eax
 ; CHECK-NEXT:    retl
 ;
 ; CHECK64-LABEL: load_i32_by_i8_zsext_loads:
 ; CHECK64:       # BB#0:
 ; CHECK64-NEXT:    movl %esi, %eax
-; CHECK64-NEXT:    movzbl 12(%rdi,%rax), %ecx
-; CHECK64-NEXT:    movzbl 13(%rdi,%rax), %edx
-; CHECK64-NEXT:    shll $8, %edx
-; CHECK64-NEXT:    orl %ecx, %edx
-; CHECK64-NEXT:    movzbl 14(%rdi,%rax), %ecx
-; CHECK64-NEXT:    shll $16, %ecx
-; CHECK64-NEXT:    orl %edx, %ecx
-; CHECK64-NEXT:    movsbl 15(%rdi,%rax), %eax
-; CHECK64-NEXT:    shll $24, %eax
-; CHECK64-NEXT:    orl %ecx, %eax
+; CHECK64-NEXT:    movl 12(%rdi,%rax), %eax
 ; CHECK64-NEXT:    retq
   %tmp = add nuw nsw i32 %arg1, 3
   %tmp2 = add nuw nsw i32 %arg1, 2




More information about the llvm-commits mailing list