[llvm] r327153 - [AMDGPU] Supported ds_read_b128 generation; Widened vector length for local address-space.

Farhana Aleen via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 9 09:41:39 PST 2018


Author: faaleen
Date: Fri Mar  9 09:41:39 2018
New Revision: 327153

URL: http://llvm.org/viewvc/llvm-project?rev=327153&view=rev
Log:
[AMDGPU] Supported ds_read_b128 generation; Widened vector length for local address-space.

Summary: Starting from GCN 2nd generation, ISA supports ds_read_b128 on top of ds_read_b64.
         This patch supports ds_read_b128 instruction pattern and generation of this instruction.
         In the vectorizer, this patch also widen the vector length so that vectorizer generates
         128 bit loads for local address-space which gets translated to ds_read_b128.
         Since the performance benefit is not clear; compiler generates ds_read_b128 under -amdgpu-ds128.

Author: FarhanaAleen

Reviewed By: rampitec, arsenm

Subscribers: llvm-commits, AMDGPU

Differential Revision: https://reviews.llvm.org/D44210

Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
    llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
    llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
    llvm/trunk/lib/Target/AMDGPU/DSInstructions.td
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
    llvm/trunk/test/CodeGen/AMDGPU/load-local-f32.ll
    llvm/trunk/test/CodeGen/AMDGPU/load-local-f64.ll
    llvm/trunk/test/CodeGen/AMDGPU/load-local-i16.ll
    llvm/trunk/test/CodeGen/AMDGPU/load-local-i32.ll
    llvm/trunk/test/CodeGen/AMDGPU/load-local-i64.ll
    llvm/trunk/test/CodeGen/AMDGPU/load-local-i8.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
    llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUInstructions.td Fri Mar  9 09:41:39 2018
@@ -248,6 +248,10 @@ class Aligned8Bytes <dag ops, dag frag>
   return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
 }]>;
 
+class Aligned16Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
+  return cast<MemSDNode>(N)->getAlignment() >= 16;
+}]>;
+
 class LoadFrag <SDPatternOperator op> : PatFrag<(ops node:$ptr), (op node:$ptr)>;
 
 class StoreFrag<SDPatternOperator op> : PatFrag <
@@ -371,6 +375,10 @@ def load_align8_local : Aligned8Bytes <
   (ops node:$ptr), (load_local node:$ptr)
 >;
 
+def load_align16_local : Aligned16Bytes <
+  (ops node:$ptr), (load_local node:$ptr)
+>;
+
 def store_align8_local : Aligned8Bytes <
   (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
 >;

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUSubtarget.h Fri Mar  9 09:41:39 2018
@@ -414,6 +414,12 @@ public:
     return FlatForGlobal;
   }
 
+  /// \returns If target supports ds_read/write_b128 and user enables generation
+  /// of ds_read/write_b128.
+  bool useDS128(bool UserEnable) const {
+    return CIInsts && UserEnable;
+  }
+
   /// \returns If MUBUF instructions always perform range checking, even for
   /// buffer resources used for private memory access.
   bool privateMemoryResourceIsRangeChecked() const {

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp Fri Mar  9 09:41:39 2018
@@ -265,11 +265,11 @@ unsigned AMDGPUTTIImpl::getLoadStoreVecR
     return 512;
   }
 
-  if (AddrSpace == AS.FLAT_ADDRESS)
-    return 128;
-  if (AddrSpace == AS.LOCAL_ADDRESS ||
+  if (AddrSpace == AS.FLAT_ADDRESS ||
+      AddrSpace == AS.LOCAL_ADDRESS ||
       AddrSpace == AS.REGION_ADDRESS)
-    return 64;
+    return 128;
+
   if (AddrSpace == AS.PRIVATE_ADDRESS)
     return 8 * ST->getMaxPrivateElementSize();
 

Modified: llvm/trunk/lib/Target/AMDGPU/DSInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/DSInstructions.td?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/DSInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/DSInstructions.td Fri Mar  9 09:41:39 2018
@@ -649,6 +649,7 @@ defm : DSReadPat_mc <DS_READ_B32, i32, "
 let AddedComplexity = 100 in {
 
 defm : DSReadPat_mc <DS_READ_B64, v2i32, "load_align8_local">;
+defm : DSReadPat_mc <DS_READ_B128, v4i32, "load_align16_local">;
 
 } // End AddedComplexity = 100
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Fri Mar  9 09:41:39 2018
@@ -94,6 +94,11 @@ static cl::opt<bool> EnableVGPRIndexMode
   cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
   cl::init(false));
 
+static cl::opt<bool> EnableDS128(
+  "amdgpu-ds128",
+  cl::desc("Use DS_read/write_b128"),
+  cl::init(false));
+
 static cl::opt<unsigned> AssumeFrameIndexHighZeroBits(
   "amdgpu-frame-index-zero-bits",
   cl::desc("High bits of frame index assumed to be zero"),
@@ -5425,14 +5430,13 @@ SDValue SITargetLowering::LowerLOAD(SDVa
       llvm_unreachable("unsupported private_element_size");
     }
   } else if (AS == AMDGPUASI.LOCAL_ADDRESS) {
-    if (NumElements > 2)
-      return SplitVectorLoad(Op, DAG);
-
-    if (NumElements == 2)
+    // Use ds_read_b128 if possible.
+    if (Subtarget->useDS128(EnableDS128) && Load->getAlignment() >= 16 &&
+        MemVT.getStoreSize() == 16)
       return SDValue();
 
-    // If properly aligned, if we split we might be able to use ds_read_b64.
-    return SplitVectorLoad(Op, DAG);
+    if (NumElements > 2)
+      return SplitVectorLoad(Op, DAG);
   }
   return SDValue();
 }

Modified: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td Fri Mar  9 09:41:39 2018
@@ -410,6 +410,9 @@ def sextloadi16_glue : PatFrag<(ops node
 def load_glue_align8 : Aligned8Bytes <
   (ops node:$ptr), (load_glue node:$ptr)
 >;
+def load_glue_align16 : Aligned16Bytes <
+  (ops node:$ptr), (load_glue node:$ptr)
+>;
 
 
 def load_local_m0 : LoadFrag<load_glue>, LocalAddress;
@@ -418,6 +421,7 @@ def sextloadi16_local_m0 : LoadFrag<sext
 def az_extloadi8_local_m0 : LoadFrag<az_extloadi8_glue>, LocalAddress;
 def az_extloadi16_local_m0 : LoadFrag<az_extloadi16_glue>, LocalAddress;
 def load_align8_local_m0 : LoadFrag <load_glue_align8>, LocalAddress;
+def load_align16_local_m0 : LoadFrag <load_glue_align16>, LocalAddress;
 
 
 def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore,

Modified: llvm/trunk/test/CodeGen/AMDGPU/load-local-f32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-local-f32.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-local-f32.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-local-f32.ll Fri Mar  9 09:41:39 2018
@@ -2,6 +2,11 @@
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SICIVI,FUNC %s
 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
 
+; Testing for ds_read_128
+; RUN: llc -march=amdgcn -mcpu=tahiti -amdgpu-ds128 < %s | FileCheck -check-prefixes=SI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
 ; FUNC-LABEL: {{^}}load_f32_local:
 ; SICIVI: s_mov_b32 m0
 ; GFX9-NOT: m0
@@ -122,4 +127,18 @@ entry:
   ret void
 }
 
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v4f32_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v4f32_to_128(<4 x float> addrspace(3)* %out, <4 x float> addrspace(3)* %in) {
+  %ld = load <4 x float>, <4 x float> addrspace(3)* %in, align 16
+  store <4 x float> %ld, <4 x float> addrspace(3)* %out
+  ret void
+}
+
 attributes #0 = { nounwind }

Modified: llvm/trunk/test/CodeGen/AMDGPU/load-local-f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-local-f64.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-local-f64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-local-f64.ll Fri Mar  9 09:41:39 2018
@@ -4,6 +4,10 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
 
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
 ; FUNC-LABEL: {{^}}local_load_f64:
 ; SICIV: s_mov_b32 m0
 ; GFX9-NOT: m0
@@ -170,4 +174,18 @@ entry:
   ret void
 }
 
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_load_v2f64_to_128:
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_load_v2f64_to_128(<2 x double> addrspace(3)* %out, <2 x double> addrspace(3)* %in) {
+entry:
+  %ld = load <2 x double>, <2 x double> addrspace(3)* %in, align 16
+  store <2 x double> %ld, <2 x double> addrspace(3)* %out
+  ret void
+}
+
 attributes #0 = { nounwind }

Modified: llvm/trunk/test/CodeGen/AMDGPU/load-local-i16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-local-i16.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-local-i16.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-local-i16.ll Fri Mar  9 09:41:39 2018
@@ -3,6 +3,10 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,GFX89,FUNC %s
 ; RUN: llc -march=r600 -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
 ; FUNC-LABEL: {{^}}local_load_i16:
 ; GFX9-NOT: m0
 ; SICIVI: s_mov_b32 m0
@@ -935,4 +939,18 @@ define amdgpu_kernel void @local_sextloa
 ;   ret void
 ; }
 
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v8i16_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v8i16_to_128(<8 x i16> addrspace(3)* %out, <8 x i16> addrspace(3)* %in) {
+  %ld = load <8 x i16>, <8 x i16> addrspace(3)* %in, align 16
+  store <8 x i16> %ld, <8 x i16> addrspace(3)* %out
+  ret void
+}
+
 attributes #0 = { nounwind }

Modified: llvm/trunk/test/CodeGen/AMDGPU/load-local-i32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-local-i32.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-local-i32.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-local-i32.ll Fri Mar  9 09:41:39 2018
@@ -3,6 +3,11 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
+; Testing for ds_read_128
+; RUN: llc -march=amdgcn -mcpu=tahiti -amdgpu-ds128 < %s | FileCheck -check-prefixes=SI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
 ; FUNC-LABEL: {{^}}local_load_i32:
 ; GCN-NOT: s_wqm_b64
 ; SICIVI: s_mov_b32 m0, -1
@@ -175,6 +180,20 @@ define amdgpu_kernel void @local_sextloa
   ret void
 }
 
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v4i32_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v4i32_to_128(<4 x i32> addrspace(3)* %out, <4 x i32> addrspace(3)* %in) {
+  %ld = load <4 x i32>, <4 x i32> addrspace(3)* %in, align 16
+  store <4 x i32> %ld, <4 x i32> addrspace(3)* %out
+  ret void
+}
+
 ; FUNC-LABEL: {{^}}local_zextload_v8i32_to_v8i64:
 ; SICIVI: s_mov_b32 m0, -1
 ; GFX9-NOT: m0

Modified: llvm/trunk/test/CodeGen/AMDGPU/load-local-i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-local-i64.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-local-i64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-local-i64.ll Fri Mar  9 09:41:39 2018
@@ -4,6 +4,10 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
 ; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
 
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+
 ; FUNC-LABEL: {{^}}local_load_i64:
 ; SICIVI: s_mov_b32 m0
 ; GFX9-NOT: m0
@@ -33,6 +37,16 @@ define amdgpu_kernel void @local_load_v2
 entry:
   %ld = load <2 x i64>, <2 x i64> addrspace(3)* %in
   store <2 x i64> %ld, <2 x i64> addrspace(3)* %out
+  ret void
+}
+
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_load_v2i64_to_128:
+; CIVI: ds_read_b128
+define amdgpu_kernel void @local_load_v2i64_to_128(<2 x i64> addrspace(3)* %out, <2 x i64> addrspace(3)* %in) {
+entry:
+  %ld = load <2 x i64>, <2 x i64> addrspace(3)* %in
+  store <2 x i64> %ld, <2 x i64> addrspace(3)* %out
   ret void
 }
 

Modified: llvm/trunk/test/CodeGen/AMDGPU/load-local-i8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/load-local-i8.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/load-local-i8.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/load-local-i8.ll Fri Mar  9 09:41:39 2018
@@ -3,6 +3,9 @@
 ; RUN: llc -march=amdgcn -mtriple=amdgcn---amdgiz -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX9,FUNC %s
 ; RUN: llc -march=r600 -mtriple=r600---amdgiz -mcpu=redwood -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
 
+; Testing for ds_read_b128
+; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-ds128 < %s | FileCheck -check-prefixes=CIVI,FUNC %s
 
 ; FUNC-LABEL: {{^}}local_load_i8:
 ; GCN-NOT: s_wqm_b64
@@ -1021,4 +1024,18 @@ define amdgpu_kernel void @local_sextloa
 ;   ret void
 ; }
 
+; Tests if ds_read_b128 gets generated for the 16 byte aligned load.
+; FUNC-LABEL: {{^}}local_v16i8_to_128:
+; SI-NOT: ds_read_b128
+; CIVI: ds_read_b128
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+; EG: LDS_READ_RET
+define amdgpu_kernel void @local_v16i8_to_128(<16 x i8> addrspace(3)* %out, <16 x i8> addrspace(3)* %in) {
+  %ld = load <16 x i8>, <16 x i8> addrspace(3)* %in, align 16
+  store <16 x i8> %ld, <16 x i8> addrspace(3)* %out
+  ret void
+}
+
 attributes #0 = { nounwind }

Modified: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll (original)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/merge-stores.ll Fri Mar  9 09:41:39 2018
@@ -504,8 +504,7 @@ define amdgpu_kernel void @merge_local_s
 }
 
 ; CHECK-LABEL: @merge_local_store_4_constants_i32
-; CHECK: store <2 x i32> <i32 456, i32 333>, <2 x i32> addrspace(3)*
-; CHECK: store <2 x i32> <i32 1234, i32 123>, <2 x i32> addrspace(3)*
+; CHECK: store <4 x i32> <i32 1234, i32 123, i32 456, i32 333>, <4 x i32> addrspace(3)*
 define amdgpu_kernel void @merge_local_store_4_constants_i32(i32 addrspace(3)* %out) #0 {
   %out.gep.1 = getelementptr i32, i32 addrspace(3)* %out, i32 1
   %out.gep.2 = getelementptr i32, i32 addrspace(3)* %out, i32 2

Modified: llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll?rev=327153&r1=327152&r2=327153&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll (original)
+++ llvm/trunk/test/Transforms/LoadStoreVectorizer/AMDGPU/multiple_tails.ll Fri Mar  9 09:41:39 2018
@@ -29,11 +29,10 @@ define amdgpu_kernel void @no_crash(i32
 ; longest chain vectorized
 
 ; CHECK-LABEL: @interleave_get_longest
-; CHECK: load <2 x i32>
+; CHECK: load <4 x i32>
 ; CHECK: load i32
 ; CHECK: store <2 x i32> zeroinitializer
 ; CHECK: load i32
-; CHECK: load <2 x i32>
 ; CHECK: load i32
 ; CHECK: load i32
 




More information about the llvm-commits mailing list