[llvm] 263bc7f - [LoadStoreVectorizer] Only upgrade align for alloca

Bjorn Pettersson via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 9 06:34:17 PDT 2023


Author: Bjorn Pettersson
Date: 2023-06-09T15:33:35+02:00
New Revision: 263bc7f905c7f354cb80403c0b2754cf8880d1b7

URL: https://github.com/llvm/llvm-project/commit/263bc7f905c7f354cb80403c0b2754cf8880d1b7
DIFF: https://github.com/llvm/llvm-project/commit/263bc7f905c7f354cb80403c0b2754cf8880d1b7.diff

LOG: [LoadStoreVectorizer] Only upgrade align for alloca

In commit 2be0abb7fe72ed453 (D149893) the load store vectorized was
reimplemented. One thing that can happen with the new LSV is that
it can increase the align of alloca and global objects. However,
the code comments indicate that the intention only was to increase
alignment of alloca.
Now we will use stripPointerCasts to analyse if the load/store really
is accessing an alloca (same as getOrEnforceKnownAlignment is using).
And then we only try to change the align if we find an alloca
instruction. This way the code will match better with code comments,
and we won't change alignment of non-stack variables to use the
"StackAdjustedAlignment".

Differential Revision: https://reviews.llvm.org/D152386

Added: 
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/dont-adjust-globalobj-alignment.ll

Modified: 
    llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
    llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
index 6e1f157a23f31..4ee2dc56f5c83 100644
--- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp
@@ -801,12 +801,14 @@ std::vector<Chain> Vectorizer::splitChainByAlignment(Chain &C) {
       //
       // FIXME: We will upgrade the alignment of the alloca even if it turns out
       // we can't vectorize for some other reason.
+      Value *PtrOperand = getLoadStorePointerOperand(C[CBegin].Inst);
+      bool IsAllocaAccess = isa<AllocaInst>(PtrOperand->stripPointerCasts());
       Align Alignment = getLoadStoreAlignment(C[CBegin].Inst);
-      if (AS == DL.getAllocaAddrSpace() && Alignment.value() % SizeBytes != 0 &&
-          IsAllowedAndFast(Align(StackAdjustedAlignment))) {
+      Align PrefAlign = Align(StackAdjustedAlignment);
+      if (IsAllocaAccess && AS == DL.getAllocaAddrSpace() &&
+          Alignment.value() % SizeBytes != 0 && IsAllowedAndFast(PrefAlign)) {
         Align NewAlign = getOrEnforceKnownAlignment(
-            getLoadStorePointerOperand(C[CBegin].Inst),
-            Align(StackAdjustedAlignment), DL, C[CBegin].Inst, nullptr, &DT);
+            PtrOperand, PrefAlign, DL, C[CBegin].Inst, nullptr, &DT);
         if (NewAlign >= Alignment) {
           LLVM_DEBUG(dbgs()
                      << "LSV: splitByChain upgrading alloca alignment from "

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
index a94ff78ac18cb..f44e199afe45e 100644
--- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll
@@ -21,9 +21,9 @@ define amdgpu_kernel void @load_unknown_offset_align1_i8(ptr addrspace(1) noalia
 ; UNALIGNED-LABEL: @load_unknown_offset_align1_i8(
 ; UNALIGNED-NEXT:    [[ALLOCA:%.*]] = alloca [128 x i8], align 1, addrspace(5)
 ; UNALIGNED-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [128 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
-; UNALIGNED-NEXT:    [[TMP2:%.*]] = load <2 x i8>, ptr addrspace(5) [[PTR0]], align 1
-; UNALIGNED-NEXT:    [[VAL01:%.*]] = extractelement <2 x i8> [[TMP2]], i32 0
-; UNALIGNED-NEXT:    [[VAL12:%.*]] = extractelement <2 x i8> [[TMP2]], i32 1
+; UNALIGNED-NEXT:    [[TMP1:%.*]] = load <2 x i8>, ptr addrspace(5) [[PTR0]], align 1
+; UNALIGNED-NEXT:    [[VAL01:%.*]] = extractelement <2 x i8> [[TMP1]], i32 0
+; UNALIGNED-NEXT:    [[VAL12:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
 ; UNALIGNED-NEXT:    [[ADD:%.*]] = add i8 [[VAL01]], [[VAL12]]
 ; UNALIGNED-NEXT:    store i8 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 1
 ; UNALIGNED-NEXT:    ret void
@@ -52,9 +52,9 @@ define amdgpu_kernel void @load_unknown_offset_align1_i16(ptr addrspace(1) noali
 ; UNALIGNED-LABEL: @load_unknown_offset_align1_i16(
 ; UNALIGNED-NEXT:    [[ALLOCA:%.*]] = alloca [128 x i16], align 1, addrspace(5)
 ; UNALIGNED-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [128 x i16], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
-; UNALIGNED-NEXT:    [[TMP2:%.*]] = load <2 x i16>, ptr addrspace(5) [[PTR0]], align 1
-; UNALIGNED-NEXT:    [[VAL01:%.*]] = extractelement <2 x i16> [[TMP2]], i32 0
-; UNALIGNED-NEXT:    [[VAL12:%.*]] = extractelement <2 x i16> [[TMP2]], i32 1
+; UNALIGNED-NEXT:    [[TMP1:%.*]] = load <2 x i16>, ptr addrspace(5) [[PTR0]], align 1
+; UNALIGNED-NEXT:    [[VAL01:%.*]] = extractelement <2 x i16> [[TMP1]], i32 0
+; UNALIGNED-NEXT:    [[VAL12:%.*]] = extractelement <2 x i16> [[TMP1]], i32 1
 ; UNALIGNED-NEXT:    [[ADD:%.*]] = add i16 [[VAL01]], [[VAL12]]
 ; UNALIGNED-NEXT:    store i16 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 2
 ; UNALIGNED-NEXT:    ret void
@@ -85,9 +85,9 @@ define amdgpu_kernel void @load_unknown_offset_align1_i32(ptr addrspace(1) noali
 ; UNALIGNED-LABEL: @load_unknown_offset_align1_i32(
 ; UNALIGNED-NEXT:    [[ALLOCA:%.*]] = alloca [128 x i32], align 1, addrspace(5)
 ; UNALIGNED-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
-; UNALIGNED-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 1
-; UNALIGNED-NEXT:    [[VAL01:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
-; UNALIGNED-NEXT:    [[VAL12:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
+; UNALIGNED-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 1
+; UNALIGNED-NEXT:    [[VAL01:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
+; UNALIGNED-NEXT:    [[VAL12:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
 ; UNALIGNED-NEXT:    [[ADD:%.*]] = add i32 [[VAL01]], [[VAL12]]
 ; UNALIGNED-NEXT:    store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
 ; UNALIGNED-NEXT:    ret void
@@ -104,15 +104,25 @@ define amdgpu_kernel void @load_unknown_offset_align1_i32(ptr addrspace(1) noali
 
 ; Make sure alloca alignment isn't decreased
 define amdgpu_kernel void @load_alloca16_unknown_offset_align1_i32(ptr addrspace(1) noalias %out, i32 %offset) #0 {
-; CHECK-LABEL: @load_alloca16_unknown_offset_align1_i32(
-; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [128 x i32], align 16, addrspace(5)
-; CHECK-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 4
-; CHECK-NEXT:    [[VAL01:%.*]] = extractelement <2 x i32> [[TMP2]], i32 0
-; CHECK-NEXT:    [[VAL12:%.*]] = extractelement <2 x i32> [[TMP2]], i32 1
-; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[VAL01]], [[VAL12]]
-; CHECK-NEXT:    store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
-; CHECK-NEXT:    ret void
+; ALIGNED-LABEL: @load_alloca16_unknown_offset_align1_i32(
+; ALIGNED-NEXT:    [[ALLOCA:%.*]] = alloca [128 x i32], align 16, addrspace(5)
+; ALIGNED-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
+; ALIGNED-NEXT:    [[VAL0:%.*]] = load i32, ptr addrspace(5) [[PTR0]], align 1
+; ALIGNED-NEXT:    [[PTR1:%.*]] = getelementptr inbounds i32, ptr addrspace(5) [[PTR0]], i32 1
+; ALIGNED-NEXT:    [[VAL1:%.*]] = load i32, ptr addrspace(5) [[PTR1]], align 1
+; ALIGNED-NEXT:    [[ADD:%.*]] = add i32 [[VAL0]], [[VAL1]]
+; ALIGNED-NEXT:    store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
+; ALIGNED-NEXT:    ret void
+;
+; UNALIGNED-LABEL: @load_alloca16_unknown_offset_align1_i32(
+; UNALIGNED-NEXT:    [[ALLOCA:%.*]] = alloca [128 x i32], align 16, addrspace(5)
+; UNALIGNED-NEXT:    [[PTR0:%.*]] = getelementptr inbounds [128 x i32], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[OFFSET:%.*]]
+; UNALIGNED-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr addrspace(5) [[PTR0]], align 4
+; UNALIGNED-NEXT:    [[VAL01:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
+; UNALIGNED-NEXT:    [[VAL12:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
+; UNALIGNED-NEXT:    [[ADD:%.*]] = add i32 [[VAL01]], [[VAL12]]
+; UNALIGNED-NEXT:    store i32 [[ADD]], ptr addrspace(1) [[OUT:%.*]], align 4
+; UNALIGNED-NEXT:    ret void
 ;
   %alloca = alloca [128 x i32], align 16, addrspace(5)
   %ptr0 = getelementptr inbounds [128 x i32], ptr addrspace(5) %alloca, i32 0, i32 %offset
@@ -235,11 +245,11 @@ define amdgpu_kernel void @merge_private_store_4_vector_elts_loads_v4i8() {
 define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i32() {
 ; CHECK-LABEL: @merge_private_load_4_vector_elts_loads_v4i32(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [8 x i32], align 4, addrspace(5)
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i32>, ptr addrspace(5) [[ALLOCA]], align 4
-; CHECK-NEXT:    [[LOAD01:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
-; CHECK-NEXT:    [[LOAD12:%.*]] = extractelement <4 x i32> [[TMP2]], i32 1
-; CHECK-NEXT:    [[LOAD23:%.*]] = extractelement <4 x i32> [[TMP2]], i32 2
-; CHECK-NEXT:    [[LOAD34:%.*]] = extractelement <4 x i32> [[TMP2]], i32 3
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr addrspace(5) [[ALLOCA]], align 4
+; CHECK-NEXT:    [[LOAD01:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LOAD12:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[LOAD23:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD34:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca [8 x i32], align 1, addrspace(5)
@@ -257,11 +267,11 @@ define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i32() {
 define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i8() {
 ; CHECK-LABEL: @merge_private_load_4_vector_elts_loads_v4i8(
 ; CHECK-NEXT:    [[ALLOCA:%.*]] = alloca [8 x i8], align 4, addrspace(5)
-; CHECK-NEXT:    [[TMP2:%.*]] = load <4 x i8>, ptr addrspace(5) [[ALLOCA]], align 4
-; CHECK-NEXT:    [[LOAD01:%.*]] = extractelement <4 x i8> [[TMP2]], i32 0
-; CHECK-NEXT:    [[LOAD12:%.*]] = extractelement <4 x i8> [[TMP2]], i32 1
-; CHECK-NEXT:    [[LOAD23:%.*]] = extractelement <4 x i8> [[TMP2]], i32 2
-; CHECK-NEXT:    [[LOAD34:%.*]] = extractelement <4 x i8> [[TMP2]], i32 3
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr addrspace(5) [[ALLOCA]], align 4
+; CHECK-NEXT:    [[LOAD01:%.*]] = extractelement <4 x i8> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LOAD12:%.*]] = extractelement <4 x i8> [[TMP1]], i32 1
+; CHECK-NEXT:    [[LOAD23:%.*]] = extractelement <4 x i8> [[TMP1]], i32 2
+; CHECK-NEXT:    [[LOAD34:%.*]] = extractelement <4 x i8> [[TMP1]], i32 3
 ; CHECK-NEXT:    ret void
 ;
   %alloca = alloca [8 x i8], align 1, addrspace(5)
@@ -278,12 +288,15 @@ define amdgpu_kernel void @merge_private_load_4_vector_elts_loads_v4i8() {
 
 ; Make sure we don't think the alignment will increase if the base address isn't an alloca
 define void @private_store_2xi16_align2_not_alloca(ptr addrspace(5) %p, ptr addrspace(5) %r) #0 {
-; CHECK-LABEL: @private_store_2xi16_align2_not_alloca(
-; ALIGNED-NEXT:  [[GEP_R:%.*]] = getelementptr i16, ptr addrspace(5) [[R:%.*]], i32 1
-; ALIGNED-NEXT:  store i16 1, ptr addrspace(5) [[R]], align 2
-; ALIGNED-NEXT:  store i16 2, ptr addrspace(5) [[GEP_R]], align 2
-; UNALIGNED-NEXT:store <2 x i16>
-; CHECK-NEXT:    ret void
+; ALIGNED-LABEL: @private_store_2xi16_align2_not_alloca(
+; ALIGNED-NEXT:    [[GEP_R:%.*]] = getelementptr i16, ptr addrspace(5) [[R:%.*]], i32 1
+; ALIGNED-NEXT:    store i16 1, ptr addrspace(5) [[R]], align 2
+; ALIGNED-NEXT:    store i16 2, ptr addrspace(5) [[GEP_R]], align 2
+; ALIGNED-NEXT:    ret void
+;
+; UNALIGNED-LABEL: @private_store_2xi16_align2_not_alloca(
+; UNALIGNED-NEXT:    store <2 x i16> <i16 1, i16 2>, ptr addrspace(5) [[R:%.*]], align 2
+; UNALIGNED-NEXT:    ret void
 ;
   %gep.r = getelementptr i16, ptr addrspace(5) %r, i32 1
   store i16 1, ptr addrspace(5) %r, align 2
@@ -309,16 +322,25 @@ define void @private_store_2xi16_align1_not_alloca(ptr addrspace(5) %p, ptr addr
 }
 
 define i32 @private_load_2xi16_align2_not_alloca(ptr addrspace(5) %p) #0 {
-; CHECK-LABEL: @private_load_2xi16_align2_not_alloca(
-; ALIGNED-NEXT:  [[GEP_P:%.*]] = getelementptr i16, ptr addrspace(5) [[P:%.*]], i64 1
-; ALIGNED-NEXT:  [[P_0:%.*]] = load i16, ptr addrspace(5) [[P]], align 2
-; ALIGNED-NEXT:  [[P_1:%.*]] = load i16, ptr addrspace(5) [[GEP_P]], align 2
-; UNALIGNED-NEXT:load <2 x i16>
-; CHECK:         [[ZEXT_0:%.*]] = zext i16
-; CHECK-NEXT:    [[ZEXT_1:%.*]] = zext i16
-; CHECK-NEXT:    [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16
-; CHECK-NEXT:    [[OR:%.*]] = or i32 [[ZEXT_0]], [[SHL_1]]
-; CHECK-NEXT:    ret i32 [[OR]]
+; ALIGNED-LABEL: @private_load_2xi16_align2_not_alloca(
+; ALIGNED-NEXT:    [[GEP_P:%.*]] = getelementptr i16, ptr addrspace(5) [[P:%.*]], i64 1
+; ALIGNED-NEXT:    [[P_0:%.*]] = load i16, ptr addrspace(5) [[P]], align 2
+; ALIGNED-NEXT:    [[P_1:%.*]] = load i16, ptr addrspace(5) [[GEP_P]], align 2
+; ALIGNED-NEXT:    [[ZEXT_0:%.*]] = zext i16 [[P_0]] to i32
+; ALIGNED-NEXT:    [[ZEXT_1:%.*]] = zext i16 [[P_1]] to i32
+; ALIGNED-NEXT:    [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16
+; ALIGNED-NEXT:    [[OR:%.*]] = or i32 [[ZEXT_0]], [[SHL_1]]
+; ALIGNED-NEXT:    ret i32 [[OR]]
+;
+; UNALIGNED-LABEL: @private_load_2xi16_align2_not_alloca(
+; UNALIGNED-NEXT:    [[TMP1:%.*]] = load <2 x i16>, ptr addrspace(5) [[P:%.*]], align 2
+; UNALIGNED-NEXT:    [[P_01:%.*]] = extractelement <2 x i16> [[TMP1]], i32 0
+; UNALIGNED-NEXT:    [[P_12:%.*]] = extractelement <2 x i16> [[TMP1]], i32 1
+; UNALIGNED-NEXT:    [[ZEXT_0:%.*]] = zext i16 [[P_01]] to i32
+; UNALIGNED-NEXT:    [[ZEXT_1:%.*]] = zext i16 [[P_12]] to i32
+; UNALIGNED-NEXT:    [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16
+; UNALIGNED-NEXT:    [[OR:%.*]] = or i32 [[ZEXT_0]], [[SHL_1]]
+; UNALIGNED-NEXT:    ret i32 [[OR]]
 ;
   %gep.p = getelementptr i16, ptr addrspace(5) %p, i64 1
   %p.0 = load i16, ptr addrspace(5) %p, align 2
@@ -342,9 +364,9 @@ define i32 @private_load_2xi16_align1_not_alloca(ptr addrspace(5) %p) #0 {
 ; ALIGNED-NEXT:    ret i32 [[OR]]
 ;
 ; UNALIGNED-LABEL: @private_load_2xi16_align1_not_alloca(
-; UNALIGNED-NEXT:    [[TMP2:%.*]] = load <2 x i16>, ptr addrspace(5) [[P:%.*]], align 1
-; UNALIGNED-NEXT:    [[P_01:%.*]] = extractelement <2 x i16> [[TMP2]], i32 0
-; UNALIGNED-NEXT:    [[P_12:%.*]] = extractelement <2 x i16> [[TMP2]], i32 1
+; UNALIGNED-NEXT:    [[TMP1:%.*]] = load <2 x i16>, ptr addrspace(5) [[P:%.*]], align 1
+; UNALIGNED-NEXT:    [[P_01:%.*]] = extractelement <2 x i16> [[TMP1]], i32 0
+; UNALIGNED-NEXT:    [[P_12:%.*]] = extractelement <2 x i16> [[TMP1]], i32 1
 ; UNALIGNED-NEXT:    [[ZEXT_0:%.*]] = zext i16 [[P_01]] to i32
 ; UNALIGNED-NEXT:    [[ZEXT_1:%.*]] = zext i16 [[P_12]] to i32
 ; UNALIGNED-NEXT:    [[SHL_1:%.*]] = shl i32 [[ZEXT_1]], 16

diff  --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/dont-adjust-globalobj-alignment.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/dont-adjust-globalobj-alignment.ll
new file mode 100644
index 0000000000000..c9dc851aadba7
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/dont-adjust-globalobj-alignment.ll
@@ -0,0 +1,24 @@
+; RUN: opt -S -passes=load-store-vectorizer --mcpu=hawaii -mattr=+unaligned-access-mode,+unaligned-scratch-access,+max-private-element-size-16 < %s | FileCheck --match-full-lines %s
+
+target triple = "amdgcn--"
+target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-p7:160:256:256:32-p8:128:128-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5"
+
+ at G = internal addrspace(5) global [8 x i16] undef, align 1
+
+; Verify that the alignment of the global remains at 1, even if we vectorize
+; the stores.
+;
+; CHECK: @G = internal addrspace(5) global [8 x i16] undef, align 1
+
+define void @private_store_2xi16_align2_not_alloca(ptr addrspace(5) %p, ptr addrspace(5) %r) {
+; CHECK: define void @private_store_2xi16_align2_not_alloca(ptr addrspace(5) [[P:%.*]], ptr addrspace(5) [[R:%.*]]) #0 {
+; CHECK-NEXT:    [[GEP0:%.*]] = getelementptr i16, ptr addrspace(5) @G, i32 0
+; CHECK-NEXT:    store <2 x i16> <i16 1, i16 2>, ptr addrspace(5) [[GEP0]], align 1
+; CHECK-NEXT:    ret void
+;
+  %gep0 = getelementptr i16, ptr addrspace(5) @G, i32 0
+  %gep1 = getelementptr i16, ptr addrspace(5) @G, i32 1
+  store i16 1, ptr addrspace(5) %gep0, align 1
+  store i16 2, ptr addrspace(5) %gep1, align 1
+  ret void
+}


        


More information about the llvm-commits mailing list