[llvm] 9bb2b4f - [AMDGPU] Add alignment check for v3 to v4 load type promotion

Christudasan Devadasan via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 1 00:33:54 PDT 2020


Author: Christudasan Devadasan
Date: 2020-11-01T12:05:34+05:30
New Revision: 9bb2b4f0aacd14b2a9678f7ccae67ebe3020321d

URL: https://github.com/llvm/llvm-project/commit/9bb2b4f0aacd14b2a9678f7ccae67ebe3020321d
DIFF: https://github.com/llvm/llvm-project/commit/9bb2b4f0aacd14b2a9678f7ccae67ebe3020321d.diff

LOG: [AMDGPU] Add alignment check for v3 to v4 load type promotion

It should be enabled only when the load alignment is at least 8-byte.

Fixes: SWDEV-256824

Reviewed By: foad

Differential Revision: https://reviews.llvm.org/D90404

Added: 
    llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/bfi_int.ll
    llvm/test/CodeGen/AMDGPU/fshl.ll
    llvm/test/CodeGen/AMDGPU/fshr.ll
    llvm/test/CodeGen/AMDGPU/merge-stores.ll
    llvm/test/CodeGen/AMDGPU/sign_extend.ll
    llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 82940443ebc8..32de18c07c42 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -7986,6 +7986,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
   LoadSDNode *Load = cast<LoadSDNode>(Op);
   ISD::LoadExtType ExtType = Load->getExtensionType();
   EVT MemVT = Load->getMemoryVT();
+  MachineMemOperand *MMO = Load->getMemOperand();
 
   if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
     if (MemVT == MVT::i16 && isTypeLegal(MVT::i16))
@@ -7996,7 +7997,6 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
 
     SDValue Chain = Load->getChain();
     SDValue BasePtr = Load->getBasePtr();
-    MachineMemOperand *MMO = Load->getMemOperand();
 
     EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
 
@@ -8052,13 +8052,15 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
          AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
 
   unsigned NumElements = MemVT.getVectorNumElements();
+  bool Is16ByteKnownDereferenceable = MMO->getPointerInfo().isDereferenceable(
+      16, *DAG.getContext(), DAG.getDataLayout());
 
   if (AS == AMDGPUAS::CONSTANT_ADDRESS ||
       AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
     if (!Op->isDivergent() && Alignment >= 4 && NumElements < 32) {
       if (MemVT.isPow2VectorType())
         return SDValue();
-      if (NumElements == 3)
+      if (NumElements == 3 && (Alignment >= 8 || Is16ByteKnownDereferenceable))
         return WidenVectorLoad(Op, DAG);
       return SplitVectorLoad(Op, DAG);
     }
@@ -8076,7 +8078,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
         Alignment >= 4 && NumElements < 32) {
       if (MemVT.isPow2VectorType())
         return SDValue();
-      if (NumElements == 3)
+      if (NumElements == 3 && (Alignment >= 8 || Is16ByteKnownDereferenceable))
         return WidenVectorLoad(Op, DAG);
       return SplitVectorLoad(Op, DAG);
     }
@@ -8092,8 +8094,11 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
     if (NumElements > 4)
       return SplitVectorLoad(Op, DAG);
     // v3 loads not supported on SI.
-    if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
-      return WidenVectorLoad(Op, DAG);
+    if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) {
+      if (Alignment >= 8 || Is16ByteKnownDereferenceable)
+        return WidenVectorLoad(Op, DAG);
+      return SplitVectorLoad(Op, DAG);
+    }
     // v3 and v4 loads are supported for private and global memory.
     return SDValue();
   }
@@ -8116,8 +8121,11 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
       if (NumElements > 4)
         return SplitVectorLoad(Op, DAG);
       // v3 loads not supported on SI.
-      if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores())
-        return WidenVectorLoad(Op, DAG);
+      if (NumElements == 3 && !Subtarget->hasDwordx3LoadStores()) {
+        if (Alignment >= 8 || Is16ByteKnownDereferenceable)
+          return WidenVectorLoad(Op, DAG);
+        return SplitVectorLoad(Op, DAG);
+      }
       return SDValue();
     default:
       llvm_unreachable("unsupported private_element_size");

diff  --git a/llvm/test/CodeGen/AMDGPU/bfi_int.ll b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
index bde5c55299fa..93f1572d70f8 100644
--- a/llvm/test/CodeGen/AMDGPU/bfi_int.ll
+++ b/llvm/test/CodeGen/AMDGPU/bfi_int.ll
@@ -8,8 +8,8 @@
 ; FUNC-LABEL: {{^}}bfi_def:
 ; R600: BFI_INT
 
-; GCN:   s_andn2_b32
-; GCN:   s_and_b32
+; GCN-DAG:   s_andn2_b32
+; GCN-DAG:   s_and_b32
 ; GCN:   s_or_b32
 define amdgpu_kernel void @bfi_def(i32 addrspace(1)* %out, i32 %x, i32 %y, i32 %z) {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll
index 6de55da7f90c..123c40a7c2d0 100644
--- a/llvm/test/CodeGen/AMDGPU/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshl.ll
@@ -12,14 +12,15 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; SI-LABEL: fshl_i32:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dword s0, s[0:1], 0xd
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s1
-; SI-NEXT:    s_lshr_b32 s1, s0, 1
-; SI-NEXT:    v_alignbit_b32 v0, s0, v0, 1
-; SI-NEXT:    s_not_b32 s0, s2
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    s_not_b32 s0, s0
+; SI-NEXT:    v_alignbit_b32 v0, s2, v0, 1
+; SI-NEXT:    s_lshr_b32 s1, s2, 1
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    v_alignbit_b32 v0, s1, v0, v1
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
@@ -27,33 +28,35 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ;
 ; VI-LABEL: fshl_i32:
 ; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    s_not_b32 s2, s2
-; VI-NEXT:    s_lshr_b32 s1, s0, 1
-; VI-NEXT:    v_alignbit_b32 v0, s0, v0, 1
-; VI-NEXT:    v_mov_b32_e32 v1, s2
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    s_not_b32 s0, s0
+; VI-NEXT:    s_lshr_b32 s1, s4, 1
+; VI-NEXT:    v_alignbit_b32 v0, s4, v0, 1
+; VI-NEXT:    v_mov_b32_e32 v1, s0
 ; VI-NEXT:    v_alignbit_b32 v2, s1, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s4
-; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
 ; VI-NEXT:    flat_store_dword v[0:1], v2
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: fshl_i32:
 ; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dword s0, s[0:1], 0x34
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s1
-; GFX9-NEXT:    s_not_b32 s2, s2
-; GFX9-NEXT:    s_lshr_b32 s1, s0, 1
-; GFX9-NEXT:    v_alignbit_b32 v0, s0, v0, 1
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    s_not_b32 s0, s0
+; GFX9-NEXT:    s_lshr_b32 s1, s4, 1
+; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, 1
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
 ; GFX9-NEXT:    v_alignbit_b32 v2, s1, v0, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9-NEXT:    global_store_dword v[0:1], v2, off
 ; GFX9-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll
index 96b609436da7..0874b094a2c0 100644
--- a/llvm/test/CodeGen/AMDGPU/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshr.ll
@@ -21,39 +21,42 @@ define amdgpu_kernel void @fshr_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; SI-LABEL: fshr_i32:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dword s0, s[0:1], 0xd
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s1
-; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    v_alignbit_b32 v0, s0, v0, v1
+; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_mov_b32_e32 v1, s0
+; SI-NEXT:    v_alignbit_b32 v0, s2, v0, v1
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: fshr_i32:
 ; VI:       ; %bb.0: ; %entry
-; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    v_alignbit_b32 v2, s0, v0, v1
-; VI-NEXT:    v_mov_b32_e32 v0, s4
-; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    v_mov_b32_e32 v0, s5
+; VI-NEXT:    v_mov_b32_e32 v1, s0
+; VI-NEXT:    v_alignbit_b32 v2, s4, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_mov_b32_e32 v1, s3
 ; VI-NEXT:    flat_store_dword v[0:1], v2
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: fshr_i32:
 ; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x2c
+; GFX9-NEXT:    s_load_dword s0, s[0:1], 0x34
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s1
-; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_alignbit_b32 v2, s0, v0, v1
-; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    v_mov_b32_e32 v0, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    v_alignbit_b32 v2, s4, v0, v1
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9-NEXT:    global_store_dword v[0:1], v2, off
 ; GFX9-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-stores.ll b/llvm/test/CodeGen/AMDGPU/merge-stores.ll
index 8d3b401c5788..18f46a8b8cf1 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-stores.ll
+++ b/llvm/test/CodeGen/AMDGPU/merge-stores.ll
@@ -275,7 +275,8 @@ define amdgpu_kernel void @merge_global_store_4_adjacent_loads_i32(i32 addrspace
 }
 
 ; GCN-LABEL: {{^}}merge_global_store_3_adjacent_loads_i32:
-; SI-DAG: buffer_load_dwordx4
+; SI-DAG: buffer_load_dwordx2
+; SI-DAG: buffer_load_dword
 ; CI-DAG: buffer_load_dwordx3
 ; GCN: s_waitcnt
 ; SI-DAG: buffer_store_dwordx2
@@ -613,7 +614,8 @@ define amdgpu_kernel void @merge_global_store_8_constants_i32(i32 addrspace(1)*
 
 ; GCN-LABEL: {{^}}copy_v3i32_align4:
 ; GCN-NOT: SCRATCH_RSRC_DWORD
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
 ; CI-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
 ; GCN-NOT: offen
 ; GCN: s_waitcnt vmcnt
@@ -647,7 +649,8 @@ define amdgpu_kernel void @copy_v3i64_align4(<3 x i64> addrspace(1)* noalias %ou
 
 ; GCN-LABEL: {{^}}copy_v3f32_align4:
 ; GCN-NOT: SCRATCH_RSRC_DWORD
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; SI-DAG: buffer_load_dword v{{[0-9]+}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:8
 ; CI-DAG: buffer_load_dwordx3 v{{\[[0-9]+:[0-9]+\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
 ; GCN-NOT: offen
 ; GCN: s_waitcnt vmcnt

diff  --git a/llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll b/llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll
new file mode 100644
index 000000000000..96ada95a0821
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/promote-vect3-load.ll
@@ -0,0 +1,71 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN %s
+
+; The type promotion for the vector loads v3i32/v3f32 into v4i32/v4f32 is enabled
+; only when the alignment is 8-byte or higher.
+; Otherwise, split the load into two separate loads (dwordx2 + dword).
+; This type promotion on smaller aligned loads can cause a page fault error
+; while accessing one extra dword beyond the buffer.
+
+define protected amdgpu_kernel void @load_v3i32_align4(<3 x i32> addrspace(1)* %arg) #0 {
+; GCN-LABEL: load_v3i32_align4:
+; GCN:       ; %bb.0:
+; GCN:         s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0
+; GCN-NEXT:    s_load_dword s{{[0-9]+}}, s[0:1], 0x8
+  %vec = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 4
+  store <3 x i32> %vec, <3 x i32> addrspace(1)* undef, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @load_v3i32_align8(<3 x i32> addrspace(1)* %arg) #0 {
+; GCN-LABEL: load_v3i32_align8:
+; GCN:       ; %bb.0:
+; GCN:         s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0
+  %vec = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 8
+  store <3 x i32> %vec, <3 x i32> addrspace(1)* undef, align 8
+  ret void
+}
+
+define protected amdgpu_kernel void @load_v3i32_align16(<3 x i32> addrspace(1)* %arg) #0 {
+; GCN-LABEL: load_v3i32_align16:
+; GCN:       ; %bb.0:
+; GCN:         s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0
+  %vec = load <3 x i32>, <3 x i32> addrspace(1)* %arg, align 16
+  store <3 x i32> %vec, <3 x i32> addrspace(1)* undef, align 16
+  ret void
+}
+
+define protected amdgpu_kernel void @load_v3f32_align4(<3 x float> addrspace(1)* %arg) #0 {
+; GCN-LABEL: load_v3f32_align4:
+; GCN:       ; %bb.0:
+; GCN:         s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx2 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0
+; GCN-NEXT:    s_load_dword s{{[0-9]+}}, s[0:1], 0x8
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %arg, align 4
+  store <3 x float> %vec, <3 x float> addrspace(1)* undef, align 4
+  ret void
+}
+
+define protected amdgpu_kernel void @load_v3f32_align8(<3 x float> addrspace(1)* %arg) #0 {
+; GCN-LABEL: load_v3f32_align8:
+; GCN:       ; %bb.0:
+; GCN:         s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %arg, align 8
+  store <3 x float> %vec, <3 x float> addrspace(1)* undef, align 8
+  ret void
+}
+
+define protected amdgpu_kernel void @load_v3f32_align16(<3 x float> addrspace(1)* %arg) #0 {
+; GCN-LABEL: load_v3f32_align16:
+; GCN:       ; %bb.0:
+; GCN:         s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dwordx4 s{{\[[0-9]:[0-9]+\]}}, s[0:1], 0x0
+  %vec = load <3 x float>, <3 x float> addrspace(1)* %arg, align 16
+  store <3 x float> %vec, <3 x float> addrspace(1)* undef, align 16
+  ret void
+}
+
+attributes #0 = { nounwind noinline }

diff  --git a/llvm/test/CodeGen/AMDGPU/sign_extend.ll b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
index d263be2dcf82..4b7b96d7cea6 100644
--- a/llvm/test/CodeGen/AMDGPU/sign_extend.ll
+++ b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
@@ -38,30 +38,32 @@ define amdgpu_kernel void @test_s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a
 ; SI-LABEL: test_s_sext_i32_to_i64:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dword s0, s[0:1], 0xd
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mul_i32 s0, s0, s1
-; SI-NEXT:    s_add_i32 s0, s0, s2
-; SI-NEXT:    s_ashr_i32 s1, s0, 31
-; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_mul_i32 s1, s2, s3
+; SI-NEXT:    s_add_i32 s1, s1, s0
+; SI-NEXT:    s_ashr_i32 s0, s1, 31
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: test_s_sext_i32_to_i64:
 ; VI:       ; %bb.0: ; %entry
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x34
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_mul_i32 s0, s0, s1
-; VI-NEXT:    s_add_i32 s0, s0, s2
-; VI-NEXT:    s_ashr_i32 s1, s0, 31
-; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_mul_i32 s1, s2, s3
+; VI-NEXT:    s_add_i32 s1, s1, s0
+; VI-NEXT:    s_ashr_i32 s0, s1, 31
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s0
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
 entry:
@@ -290,13 +292,14 @@ define amdgpu_kernel void @v_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32
 ; SI-LABEL: v_sext_i1_to_i16_with_and:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
+; SI-NEXT:    s_load_dword s0, s[0:1], 0xd
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
-; SI-NEXT:    v_mov_b32_e32 v0, s2
-; SI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s1, v0
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, s2, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s0
+; SI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v0
 ; SI-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
@@ -305,13 +308,14 @@ define amdgpu_kernel void @v_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32
 ; VI-LABEL: v_sext_i1_to_i16_with_and:
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
-; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x2c
+; VI-NEXT:    s_load_dword s0, s[0:1], 0x34
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
-; VI-NEXT:    v_mov_b32_e32 v0, s2
-; VI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s1, v0
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s2, v0
+; VI-NEXT:    v_mov_b32_e32 v0, s0
+; VI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v0
 ; VI-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; VI-NEXT:    buffer_store_short v0, off, s[4:7], 0

diff  --git a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
index 831bc8f79653..74607f6fa3dd 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
@@ -34,35 +34,37 @@ define amdgpu_kernel void @extract_insert_same_dynelt_v4i32(i32 addrspace(1)* %o
 define amdgpu_kernel void @extract_insert_
diff erent_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx0, i32 %idx1) #1 {
 ; GCN-LABEL: extract_insert_
diff erent_dynelt_v4i32:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
 ; GCN-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, 0
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b64 s[0:1], s[6:7]
+; GCN-NEXT:    s_mov_b64 s[4:5], s[10:11]
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 4, v0
-; GCN-NEXT:    buffer_load_dwordx4 v[1:4], v[4:5], s[0:3], 0 addr64
+; GCN-NEXT:    buffer_load_dwordx4 v[1:4], v[4:5], s[4:7], 0 addr64
+; GCN-NEXT:    s_load_dword s0, s[0:1], 0xf
 ; GCN-NEXT:    v_lshlrev_b32_e32 v6, 2, v0
-; GCN-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 3
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 3
 ; GCN-NEXT:    v_mov_b32_e32 v7, v5
-; GCN-NEXT:    s_mov_b64 s[6:7], s[2:3]
+; GCN-NEXT:    s_mov_b64 s[10:11], s[6:7]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v0, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 2
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 2
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 1
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 1
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v0, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s10, 1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 1
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s10, 2
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 2
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s10, 3
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 3
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; GCN-NEXT:    buffer_store_dword v0, v[6:7], s[4:7], 0 addr64
+; GCN-NEXT:    buffer_store_dword v0, v[6:7], s[8:11], 0 addr64
 ; GCN-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %id.ext = sext i32 %id to i64


        


More information about the llvm-commits mailing list