[llvm] e9ab7ff - SeparateConstOffsetFromGEP: Copy a test to AMDGPU

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 26 10:58:13 PDT 2023


Author: Matt Arsenault
Date: 2023-06-26T13:58:06-04:00
New Revision: e9ab7ff73aea954669e9c52aa3917cee6bb6fb83

URL: https://github.com/llvm/llvm-project/commit/e9ab7ff73aea954669e9c52aa3917cee6bb6fb83
DIFF: https://github.com/llvm/llvm-project/commit/e9ab7ff73aea954669e9c52aa3917cee6bb6fb83.diff

LOG: SeparateConstOffsetFromGEP: Copy a test to AMDGPU

Added: 
    llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn.ll
new file mode 100644
index 0000000000000..237c2b42b1901
--- /dev/null
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/split-gep-and-gvn.ll
@@ -0,0 +1,293 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=separate-const-offset-from-gep,gvn \
+; RUN:     -reassociate-geps-verify-no-dead-code < %s \
+; RUN:     | FileCheck --check-prefix=IR %s
+
+; Verifies the SeparateConstOffsetFromGEP pass.
+; The following code computes
+; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1]
+;
+; We expect SeparateConstOffsetFromGEP to transform it to
+;
+; ptr base = &a[x][y];
+; *output = base[0] + base[1] + base[32] + base[33];
+;
+; so the backend can emit PTX that uses fewer virtual registers.
+
+ at array = internal addrspace(3) global [32 x [32 x float]] zeroinitializer, align 4
+
+define void @sum_of_array(i32 %x, i32 %y, ptr nocapture %output) {
+; IR-LABEL: define void @sum_of_array
+; IR-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], ptr nocapture [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; IR-NEXT:  .preheader:
+; IR-NEXT:    [[I:%.*]] = sext i32 [[Y]] to i64
+; IR-NEXT:    [[I1:%.*]] = sext i32 [[X]] to i64
+; IR-NEXT:    [[I2:%.*]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 [[I1]], i64 [[I]]
+; IR-NEXT:    [[I3:%.*]] = addrspacecast ptr addrspace(3) [[I2]] to ptr
+; IR-NEXT:    [[I4:%.*]] = load float, ptr [[I3]], align 4
+; IR-NEXT:    [[I5:%.*]] = fadd float [[I4]], 0.000000e+00
+; IR-NEXT:    [[I82:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 1
+; IR-NEXT:    [[I9:%.*]] = addrspacecast ptr addrspace(3) [[I82]] to ptr
+; IR-NEXT:    [[I10:%.*]] = load float, ptr [[I9]], align 4
+; IR-NEXT:    [[I11:%.*]] = fadd float [[I5]], [[I10]]
+; IR-NEXT:    [[I144:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 32
+; IR-NEXT:    [[I15:%.*]] = addrspacecast ptr addrspace(3) [[I144]] to ptr
+; IR-NEXT:    [[I16:%.*]] = load float, ptr [[I15]], align 4
+; IR-NEXT:    [[I17:%.*]] = fadd float [[I11]], [[I16]]
+; IR-NEXT:    [[I187:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 33
+; IR-NEXT:    [[I19:%.*]] = addrspacecast ptr addrspace(3) [[I187]] to ptr
+; IR-NEXT:    [[I20:%.*]] = load float, ptr [[I19]], align 4
+; IR-NEXT:    [[I21:%.*]] = fadd float [[I17]], [[I20]]
+; IR-NEXT:    store float [[I21]], ptr [[OUTPUT]], align 4
+; IR-NEXT:    ret void
+;
+.preheader:
+  %i = sext i32 %y to i64
+  %i1 = sext i32 %x to i64
+  %i2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i
+  %i3 = addrspacecast ptr addrspace(3) %i2 to ptr
+  %i4 = load float, ptr %i3, align 4
+  %i5 = fadd float %i4, 0.000000e+00
+  %i6 = add i32 %y, 1
+  %i7 = sext i32 %i6 to i64
+  %i8 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i7
+  %i9 = addrspacecast ptr addrspace(3) %i8 to ptr
+  %i10 = load float, ptr %i9, align 4
+  %i11 = fadd float %i5, %i10
+  %i12 = add i32 %x, 1
+  %i13 = sext i32 %i12 to i64
+  %i14 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i13, i64 %i
+  %i15 = addrspacecast ptr addrspace(3) %i14 to ptr
+  %i16 = load float, ptr %i15, align 4
+  %i17 = fadd float %i11, %i16
+  %i18 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i13, i64 %i7
+  %i19 = addrspacecast ptr addrspace(3) %i18 to ptr
+  %i20 = load float, ptr %i19, align 4
+  %i21 = fadd float %i17, %i20
+  store float %i21, ptr %output, align 4
+  ret void
+}
+
+
+; TODO: GVN is unable to preserve the "inbounds" keyword on the first GEP. Need
+; some infrastructure changes to enable such optimizations.
+
+; @sum_of_array2 is very similar to @sum_of_array. The only 
diff erence is in
+; the order of "sext" and "add" when computing the array indices. @sum_of_array
+; computes add before sext, e.g., array[sext(x + 1)][sext(y + 1)], while
+; @sum_of_array2 computes sext before add,
+; e.g., array[sext(x) + 1][sext(y) + 1]. SeparateConstOffsetFromGEP should be
+; able to extract constant offsets from both forms.
+define void @sum_of_array2(i32 %x, i32 %y, ptr nocapture %output) {
+; IR-LABEL: define void @sum_of_array2
+; IR-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], ptr nocapture [[OUTPUT:%.*]]) #[[ATTR0]] {
+; IR-NEXT:  .preheader:
+; IR-NEXT:    [[I:%.*]] = sext i32 [[Y]] to i64
+; IR-NEXT:    [[I1:%.*]] = sext i32 [[X]] to i64
+; IR-NEXT:    [[I2:%.*]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 [[I1]], i64 [[I]]
+; IR-NEXT:    [[I3:%.*]] = addrspacecast ptr addrspace(3) [[I2]] to ptr
+; IR-NEXT:    [[I4:%.*]] = load float, ptr [[I3]], align 4
+; IR-NEXT:    [[I5:%.*]] = fadd float [[I4]], 0.000000e+00
+; IR-NEXT:    [[I72:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 1
+; IR-NEXT:    [[I8:%.*]] = addrspacecast ptr addrspace(3) [[I72]] to ptr
+; IR-NEXT:    [[I9:%.*]] = load float, ptr [[I8]], align 4
+; IR-NEXT:    [[I10:%.*]] = fadd float [[I5]], [[I9]]
+; IR-NEXT:    [[I124:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 32
+; IR-NEXT:    [[I13:%.*]] = addrspacecast ptr addrspace(3) [[I124]] to ptr
+; IR-NEXT:    [[I14:%.*]] = load float, ptr [[I13]], align 4
+; IR-NEXT:    [[I15:%.*]] = fadd float [[I10]], [[I14]]
+; IR-NEXT:    [[I167:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 33
+; IR-NEXT:    [[I17:%.*]] = addrspacecast ptr addrspace(3) [[I167]] to ptr
+; IR-NEXT:    [[I18:%.*]] = load float, ptr [[I17]], align 4
+; IR-NEXT:    [[I19:%.*]] = fadd float [[I15]], [[I18]]
+; IR-NEXT:    store float [[I19]], ptr [[OUTPUT]], align 4
+; IR-NEXT:    ret void
+;
+.preheader:
+  %i = sext i32 %y to i64
+  %i1 = sext i32 %x to i64
+  %i2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i
+  %i3 = addrspacecast ptr addrspace(3) %i2 to ptr
+  %i4 = load float, ptr %i3, align 4
+  %i5 = fadd float %i4, 0.000000e+00
+  %i6 = add i64 %i, 1
+  %i7 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i6
+  %i8 = addrspacecast ptr addrspace(3) %i7 to ptr
+  %i9 = load float, ptr %i8, align 4
+  %i10 = fadd float %i5, %i9
+  %i11 = add i64 %i1, 1
+  %i12 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i11, i64 %i
+  %i13 = addrspacecast ptr addrspace(3) %i12 to ptr
+  %i14 = load float, ptr %i13, align 4
+  %i15 = fadd float %i10, %i14
+  %i16 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i11, i64 %i6
+  %i17 = addrspacecast ptr addrspace(3) %i16 to ptr
+  %i18 = load float, ptr %i17, align 4
+  %i19 = fadd float %i15, %i18
+  store float %i19, ptr %output, align 4
+  ret void
+}
+
+; This function loads
+;   array[zext(x)][zext(y)]
+;   array[zext(x)][zext(y +nuw 1)]
+;   array[zext(x +nuw 1)][zext(y)]
+;   array[zext(x +nuw 1)][zext(y +nuw 1)].
+;
+; This function is similar to @sum_of_array, but it
+; 1) extends array indices using zext instead of sext;
+; 2) annotates the addition with "nuw"; otherwise, zext(x + 1) => zext(x) + 1
+;    may be invalid.
+
+define void @sum_of_array3(i32 %x, i32 %y, ptr nocapture %output) {
+; IR-LABEL: define void @sum_of_array3
+; IR-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], ptr nocapture [[OUTPUT:%.*]]) #[[ATTR0]] {
+; IR-NEXT:  .preheader:
+; IR-NEXT:    [[I:%.*]] = zext i32 [[Y]] to i64
+; IR-NEXT:    [[I1:%.*]] = zext i32 [[X]] to i64
+; IR-NEXT:    [[I2:%.*]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 [[I1]], i64 [[I]]
+; IR-NEXT:    [[I3:%.*]] = addrspacecast ptr addrspace(3) [[I2]] to ptr
+; IR-NEXT:    [[I4:%.*]] = load float, ptr [[I3]], align 4
+; IR-NEXT:    [[I5:%.*]] = fadd float [[I4]], 0.000000e+00
+; IR-NEXT:    [[I82:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 1
+; IR-NEXT:    [[I9:%.*]] = addrspacecast ptr addrspace(3) [[I82]] to ptr
+; IR-NEXT:    [[I10:%.*]] = load float, ptr [[I9]], align 4
+; IR-NEXT:    [[I11:%.*]] = fadd float [[I5]], [[I10]]
+; IR-NEXT:    [[I144:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 32
+; IR-NEXT:    [[I15:%.*]] = addrspacecast ptr addrspace(3) [[I144]] to ptr
+; IR-NEXT:    [[I16:%.*]] = load float, ptr [[I15]], align 4
+; IR-NEXT:    [[I17:%.*]] = fadd float [[I11]], [[I16]]
+; IR-NEXT:    [[I187:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 33
+; IR-NEXT:    [[I19:%.*]] = addrspacecast ptr addrspace(3) [[I187]] to ptr
+; IR-NEXT:    [[I20:%.*]] = load float, ptr [[I19]], align 4
+; IR-NEXT:    [[I21:%.*]] = fadd float [[I17]], [[I20]]
+; IR-NEXT:    store float [[I21]], ptr [[OUTPUT]], align 4
+; IR-NEXT:    ret void
+;
+.preheader:
+  %i = zext i32 %y to i64
+  %i1 = zext i32 %x to i64
+  %i2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i
+  %i3 = addrspacecast ptr addrspace(3) %i2 to ptr
+  %i4 = load float, ptr %i3, align 4
+  %i5 = fadd float %i4, 0.000000e+00
+  %i6 = add nuw i32 %y, 1
+  %i7 = zext i32 %i6 to i64
+  %i8 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i7
+  %i9 = addrspacecast ptr addrspace(3) %i8 to ptr
+  %i10 = load float, ptr %i9, align 4
+  %i11 = fadd float %i5, %i10
+  %i12 = add nuw i32 %x, 1
+  %i13 = zext i32 %i12 to i64
+  %i14 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i13, i64 %i
+  %i15 = addrspacecast ptr addrspace(3) %i14 to ptr
+  %i16 = load float, ptr %i15, align 4
+  %i17 = fadd float %i11, %i16
+  %i18 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i13, i64 %i7
+  %i19 = addrspacecast ptr addrspace(3) %i18 to ptr
+  %i20 = load float, ptr %i19, align 4
+  %i21 = fadd float %i17, %i20
+  store float %i21, ptr %output, align 4
+  ret void
+}
+
+; This function loads
+;   array[zext(x)][zext(y)]
+;   array[zext(x)][zext(y)]
+;   array[zext(x) + 1][zext(y) + 1]
+;   array[zext(x) + 1][zext(y) + 1].
+;
+; We expect the generated code to reuse the computation of
+; &array[zext(x)][zext(y)]. See the expected IR and PTX for details.
+define void @sum_of_array4(i32 %x, i32 %y, ptr nocapture %output) {
+; IR-LABEL: define void @sum_of_array4
+; IR-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], ptr nocapture [[OUTPUT:%.*]]) #[[ATTR0]] {
+; IR-NEXT:  .preheader:
+; IR-NEXT:    [[I:%.*]] = zext i32 [[Y]] to i64
+; IR-NEXT:    [[I1:%.*]] = zext i32 [[X]] to i64
+; IR-NEXT:    [[I2:%.*]] = getelementptr [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 [[I1]], i64 [[I]]
+; IR-NEXT:    [[I3:%.*]] = addrspacecast ptr addrspace(3) [[I2]] to ptr
+; IR-NEXT:    [[I4:%.*]] = load float, ptr [[I3]], align 4
+; IR-NEXT:    [[I5:%.*]] = fadd float [[I4]], 0.000000e+00
+; IR-NEXT:    [[I72:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 1
+; IR-NEXT:    [[I8:%.*]] = addrspacecast ptr addrspace(3) [[I72]] to ptr
+; IR-NEXT:    [[I9:%.*]] = load float, ptr [[I8]], align 4
+; IR-NEXT:    [[I10:%.*]] = fadd float [[I5]], [[I9]]
+; IR-NEXT:    [[I124:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 32
+; IR-NEXT:    [[I13:%.*]] = addrspacecast ptr addrspace(3) [[I124]] to ptr
+; IR-NEXT:    [[I14:%.*]] = load float, ptr [[I13]], align 4
+; IR-NEXT:    [[I15:%.*]] = fadd float [[I10]], [[I14]]
+; IR-NEXT:    [[I167:%.*]] = getelementptr inbounds float, ptr addrspace(3) [[I2]], i64 33
+; IR-NEXT:    [[I17:%.*]] = addrspacecast ptr addrspace(3) [[I167]] to ptr
+; IR-NEXT:    [[I18:%.*]] = load float, ptr [[I17]], align 4
+; IR-NEXT:    [[I19:%.*]] = fadd float [[I15]], [[I18]]
+; IR-NEXT:    store float [[I19]], ptr [[OUTPUT]], align 4
+; IR-NEXT:    ret void
+;
+.preheader:
+  %i = zext i32 %y to i64
+  %i1 = zext i32 %x to i64
+  %i2 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i
+  %i3 = addrspacecast ptr addrspace(3) %i2 to ptr
+  %i4 = load float, ptr %i3, align 4
+  %i5 = fadd float %i4, 0.000000e+00
+  %i6 = add i64 %i, 1
+  %i7 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i1, i64 %i6
+  %i8 = addrspacecast ptr addrspace(3) %i7 to ptr
+  %i9 = load float, ptr %i8, align 4
+  %i10 = fadd float %i5, %i9
+  %i11 = add i64 %i1, 1
+  %i12 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i11, i64 %i
+  %i13 = addrspacecast ptr addrspace(3) %i12 to ptr
+  %i14 = load float, ptr %i13, align 4
+  %i15 = fadd float %i10, %i14
+  %i16 = getelementptr inbounds [32 x [32 x float]], ptr addrspace(3) @array, i64 0, i64 %i11, i64 %i6
+  %i17 = addrspacecast ptr addrspace(3) %i16 to ptr
+  %i18 = load float, ptr %i17, align 4
+  %i19 = fadd float %i15, %i18
+  store float %i19, ptr %output, align 4
+  ret void
+}
+
+; The source code is:
+;   p0 = &input[sext(x + y)];
+;   p1 = &input[sext(x + (y + 5))];
+;
+; Without reuniting extensions, SeparateConstOffsetFromGEP would emit
+;   p0 = &input[sext(x + y)];
+;   t1 = &input[sext(x) + sext(y)];
+;   p1 = &t1[5];
+;
+; With reuniting extensions, it merges p0 and t1 and thus emits
+;   p0 = &input[sext(x + y)];
+;   p1 = &p0[5];
+define void @reunion(i32 %x, i32 %y, ptr %input) {
+; IR-LABEL: define void @reunion
+; IR-SAME: (i32 [[X:%.*]], i32 [[Y:%.*]], ptr [[INPUT:%.*]]) #[[ATTR0]] {
+; IR-NEXT:  entry:
+; IR-NEXT:    [[XY:%.*]] = add nsw i32 [[X]], [[Y]]
+; IR-NEXT:    [[I:%.*]] = sext i32 [[XY]] to i64
+; IR-NEXT:    [[P0:%.*]] = getelementptr float, ptr [[INPUT]], i64 [[I]]
+; IR-NEXT:    [[V0:%.*]] = load float, ptr [[P0]], align 4
+; IR-NEXT:    call void @use(float [[V0]])
+; IR-NEXT:    [[P13:%.*]] = getelementptr inbounds float, ptr [[P0]], i64 5
+; IR-NEXT:    [[V1:%.*]] = load float, ptr [[P13]], align 4
+; IR-NEXT:    call void @use(float [[V1]])
+; IR-NEXT:    ret void
+;
+entry:
+  %xy = add nsw i32 %x, %y
+  %i = sext i32 %xy to i64
+  %p0 = getelementptr inbounds float, ptr %input, i64 %i
+  %v0 = load float, ptr %p0, align 4
+  call void @use(float %v0)
+  %y5 = add nsw i32 %y, 5
+  %xy5 = add nsw i32 %x, %y5
+  %i1 = sext i32 %xy5 to i64
+  %p1 = getelementptr inbounds float, ptr %input, i64 %i1
+  %v1 = load float, ptr %p1, align 4
+  call void @use(float %v1)
+  ret void
+}
+
+declare void @use(float)


        


More information about the llvm-commits mailing list