[llvm] [AMDGPU] Enable more consecutive load folding during aggressive-instcombine (PR #158036)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 12 02:08:48 PDT 2025
================
@@ -0,0 +1,238 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -passes=sroa,instcombine,aggressive-instcombine %s -S -o - | FileCheck %s
+
+define i64 @quux(ptr %arg) {
+; CHECK-LABEL: define i64 @quux(
+; CHECK-SAME: ptr [[ARG:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[BB:.*:]]
+; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[ARG]], align 1
+; CHECK-NEXT: ret i64 [[LOAD]]
+;
+bb:
+ %load = load i8, ptr %arg, align 1
+ %getelementptr = getelementptr inbounds nuw i8, ptr %arg, i64 1
+ %load1 = load i8, ptr %getelementptr, align 1
+ %getelementptr2 = getelementptr inbounds nuw i8, ptr %arg, i64 2
+ %load3 = load i8, ptr %getelementptr2, align 1
+ %getelementptr4 = getelementptr inbounds nuw i8, ptr %arg, i64 3
+ %load5 = load i8, ptr %getelementptr4, align 1
+ %getelementptr6 = getelementptr inbounds nuw i8, ptr %arg, i64 4
+ %load7 = load i8, ptr %getelementptr6, align 1
+ %getelementptr8 = getelementptr inbounds nuw i8, ptr %arg, i64 5
+ %load9 = load i8, ptr %getelementptr8, align 1
+ %getelementptr10 = getelementptr inbounds nuw i8, ptr %arg, i64 6
+ %load11 = load i8, ptr %getelementptr10, align 1
+ %getelementptr12 = getelementptr inbounds nuw i8, ptr %arg, i64 7
+ %load13 = load i8, ptr %getelementptr12, align 1
+ %zext = zext i8 %load13 to i64
+ %shl = shl nuw i64 %zext, 56
+ %zext14 = zext i8 %load11 to i64
+ %shl15 = shl nuw nsw i64 %zext14, 48
+ %or = or disjoint i64 %shl, %shl15
+ %zext16 = zext i8 %load9 to i64
+ %shl17 = shl nuw nsw i64 %zext16, 40
+ %or18 = or disjoint i64 %or, %shl17
+ %zext19 = zext i8 %load7 to i64
+ %shl20 = shl nuw nsw i64 %zext19, 32
+ %or21 = or disjoint i64 %or18, %shl20
+ %zext22 = zext i8 %load5 to i64
+ %shl23 = shl nuw nsw i64 %zext22, 24
+ %or24 = or disjoint i64 %or21, %shl23
+ %zext25 = zext i8 %load3 to i64
+ %shl26 = shl nuw nsw i64 %zext25, 16
+ %zext27 = zext i8 %load1 to i64
+ %shl28 = shl nuw nsw i64 %zext27, 8
+ %or29 = or disjoint i64 %or24, %shl26
+ %zext30 = zext i8 %load to i64
+ %or31 = or i64 %or29, %shl28
+ %or32 = or i64 %or31, %zext30
+ ret i64 %or32
+}
+
+
+; The following test case reduced from a client kernel
+define fastcc <16 x float> @hoge(ptr %arg) {
+; CHECK-LABEL: define fastcc <16 x float> @hoge(
+; CHECK-SAME: ptr [[ARG:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[BB:.*:]]
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[ARG]], align 8
+; CHECK-NEXT: [[LOAD28:%.*]] = load i64, ptr [[LOAD]], align 1
+; CHECK-NEXT: [[GETELEMENTPTR72:%.*]] = getelementptr i8, ptr [[LOAD]], i64 8
+; CHECK-NEXT: [[LOAD73:%.*]] = load i64, ptr [[GETELEMENTPTR72]], align 1
+; CHECK-NEXT: [[GETELEMENTPTR120:%.*]] = getelementptr i8, ptr [[LOAD]], i64 16
+; CHECK-NEXT: [[LOAD121:%.*]] = load i64, ptr [[GETELEMENTPTR120]], align 1
+; CHECK-NEXT: [[GETELEMENTPTR168:%.*]] = getelementptr i8, ptr [[LOAD]], i64 24
+; CHECK-NEXT: [[LOAD169:%.*]] = load i64, ptr [[GETELEMENTPTR168]], align 1
+; CHECK-NEXT: [[CALL:%.*]] = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 [[LOAD28]], i64 0, <16 x float> zeroinitializer, i32 0, i32 0, i32 0)
+; CHECK-NEXT: [[CALL225:%.*]] = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 [[LOAD73]], i64 0, <16 x float> [[CALL]], i32 0, i32 0, i32 0)
+; CHECK-NEXT: [[CALL230:%.*]] = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 [[LOAD121]], i64 0, <16 x float> [[CALL225]], i32 0, i32 0, i32 0)
+; CHECK-NEXT: [[CALL235:%.*]] = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 [[LOAD169]], i64 0, <16 x float> [[CALL230]], i32 0, i32 0, i32 0)
+; CHECK-NEXT: ret <16 x float> [[CALL235]]
+;
+bb:
+ %load = load ptr, ptr %arg, align 8
+ %load28 = load i8, ptr %load, align 1
+ %getelementptr30 = getelementptr i8, ptr %load, i64 1
+ %load31 = load i8, ptr %getelementptr30, align 1
+ %getelementptr36 = getelementptr i8, ptr %load, i64 2
+ %load37 = load i8, ptr %getelementptr36, align 1
+ %getelementptr42 = getelementptr i8, ptr %load, i64 3
+ %load43 = load i8, ptr %getelementptr42, align 1
+ %getelementptr48 = getelementptr i8, ptr %load, i64 4
+ %load49 = load i8, ptr %getelementptr48, align 1
+ %getelementptr54 = getelementptr i8, ptr %load, i64 5
+ %load55 = load i8, ptr %getelementptr54, align 1
+ %getelementptr60 = getelementptr i8, ptr %load, i64 6
+ %load61 = load i8, ptr %getelementptr60, align 1
+ %getelementptr66 = getelementptr i8, ptr %load, i64 7
+ %load67 = load i8, ptr %getelementptr66, align 1
+ %getelementptr72 = getelementptr i8, ptr %load, i64 8
+ %load73 = load i8, ptr %getelementptr72, align 1
+ %getelementptr78 = getelementptr i8, ptr %load, i64 9
+ %load79 = load i8, ptr %getelementptr78, align 1
+ %getelementptr84 = getelementptr i8, ptr %load, i64 10
+ %load85 = load i8, ptr %getelementptr84, align 1
+ %getelementptr90 = getelementptr i8, ptr %load, i64 11
+ %load91 = load i8, ptr %getelementptr90, align 1
+ %getelementptr96 = getelementptr i8, ptr %load, i64 12
+ %load97 = load i8, ptr %getelementptr96, align 1
+ %getelementptr102 = getelementptr i8, ptr %load, i64 13
+ %load103 = load i8, ptr %getelementptr102, align 1
+ %getelementptr108 = getelementptr i8, ptr %load, i64 14
+ %load109 = load i8, ptr %getelementptr108, align 1
+ %getelementptr114 = getelementptr i8, ptr %load, i64 15
+ %load115 = load i8, ptr %getelementptr114, align 1
+ %getelementptr120 = getelementptr i8, ptr %load, i64 16
+ %load121 = load i8, ptr %getelementptr120, align 1
+ %getelementptr126 = getelementptr i8, ptr %load, i64 17
+ %load127 = load i8, ptr %getelementptr126, align 1
+ %getelementptr132 = getelementptr i8, ptr %load, i64 18
+ %load133 = load i8, ptr %getelementptr132, align 1
+ %getelementptr138 = getelementptr i8, ptr %load, i64 19
+ %load139 = load i8, ptr %getelementptr138, align 1
+ %getelementptr144 = getelementptr i8, ptr %load, i64 20
+ %load145 = load i8, ptr %getelementptr144, align 1
+ %getelementptr150 = getelementptr i8, ptr %load, i64 21
+ %load151 = load i8, ptr %getelementptr150, align 1
+ %getelementptr156 = getelementptr i8, ptr %load, i64 22
+ %load157 = load i8, ptr %getelementptr156, align 1
+ %getelementptr162 = getelementptr i8, ptr %load, i64 23
+ %load163 = load i8, ptr %getelementptr162, align 1
+ %getelementptr168 = getelementptr i8, ptr %load, i64 24
+ %load169 = load i8, ptr %getelementptr168, align 1
+ %getelementptr174 = getelementptr i8, ptr %load, i64 25
+ %load175 = load i8, ptr %getelementptr174, align 1
+ %getelementptr180 = getelementptr i8, ptr %load, i64 26
+ %load181 = load i8, ptr %getelementptr180, align 1
+ %getelementptr186 = getelementptr i8, ptr %load, i64 27
+ %load187 = load i8, ptr %getelementptr186, align 1
+ %getelementptr192 = getelementptr i8, ptr %load, i64 28
+ %load193 = load i8, ptr %getelementptr192, align 1
+ %getelementptr198 = getelementptr i8, ptr %load, i64 29
+ %load199 = load i8, ptr %getelementptr198, align 1
+ %getelementptr204 = getelementptr i8, ptr %load, i64 30
+ %load205 = load i8, ptr %getelementptr204, align 1
+ %getelementptr210 = getelementptr i8, ptr %load, i64 31
+ %load211 = load i8, ptr %getelementptr210, align 1
+ %alloca1.sroa.8.0.insert.ext = zext i8 %load67 to i64
+ %alloca1.sroa.8.0.insert.shift = shl i64 %alloca1.sroa.8.0.insert.ext, 56
+ %alloca1.sroa.7.0.insert.ext = zext i8 %load61 to i64
+ %alloca1.sroa.7.0.insert.shift = shl i64 %alloca1.sroa.7.0.insert.ext, 48
+ %alloca1.sroa.7.0.insert.insert = or i64 %alloca1.sroa.8.0.insert.shift, %alloca1.sroa.7.0.insert.shift
+ %alloca1.sroa.6.0.insert.ext = zext i8 %load55 to i64
+ %alloca1.sroa.6.0.insert.shift = shl i64 %alloca1.sroa.6.0.insert.ext, 40
+ %alloca1.sroa.6.0.insert.insert = or i64 %alloca1.sroa.7.0.insert.insert, %alloca1.sroa.6.0.insert.shift
+ %alloca1.sroa.5.0.insert.ext = zext i8 %load49 to i64
+ %alloca1.sroa.5.0.insert.shift = shl i64 %alloca1.sroa.5.0.insert.ext, 32
+ %alloca1.sroa.5.0.insert.insert = or i64 %alloca1.sroa.6.0.insert.insert, %alloca1.sroa.5.0.insert.shift
+ %alloca1.sroa.4.0.insert.ext = zext i8 %load43 to i64
+ %alloca1.sroa.4.0.insert.shift = shl i64 %alloca1.sroa.4.0.insert.ext, 24
+ %alloca1.sroa.4.0.insert.insert = or i64 %alloca1.sroa.5.0.insert.insert, %alloca1.sroa.4.0.insert.shift
+ %alloca1.sroa.3.0.insert.ext = zext i8 %load37 to i64
+ %alloca1.sroa.3.0.insert.shift = shl i64 %alloca1.sroa.3.0.insert.ext, 16
+ %alloca1.sroa.2.0.insert.ext = zext i8 %load31 to i64
+ %alloca1.sroa.2.0.insert.shift = shl i64 %alloca1.sroa.2.0.insert.ext, 8
+ %alloca1.sroa.2.0.insert.mask = or i64 %alloca1.sroa.4.0.insert.insert, %alloca1.sroa.3.0.insert.shift
+ %alloca1.sroa.0.0.insert.ext = zext i8 %load28 to i64
+ %alloca1.sroa.0.0.insert.mask = or i64 %alloca1.sroa.2.0.insert.mask, %alloca1.sroa.2.0.insert.shift
+ %alloca1.sroa.0.0.insert.insert = or i64 %alloca1.sroa.0.0.insert.mask, %alloca1.sroa.0.0.insert.ext
+ %call = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 %alloca1.sroa.0.0.insert.insert, i64 0, <16 x float> zeroinitializer, i32 0, i32 0, i32 0)
+ %alloca1.sroa.17.8.insert.ext = zext i8 %load115 to i64
+ %alloca1.sroa.17.8.insert.shift = shl i64 %alloca1.sroa.17.8.insert.ext, 56
+ %alloca1.sroa.16.8.insert.ext = zext i8 %load109 to i64
+ %alloca1.sroa.16.8.insert.shift = shl i64 %alloca1.sroa.16.8.insert.ext, 48
+ %alloca1.sroa.16.8.insert.insert = or i64 %alloca1.sroa.17.8.insert.shift, %alloca1.sroa.16.8.insert.shift
+ %alloca1.sroa.15.8.insert.ext = zext i8 %load103 to i64
+ %alloca1.sroa.15.8.insert.shift = shl i64 %alloca1.sroa.15.8.insert.ext, 40
+ %alloca1.sroa.15.8.insert.insert = or i64 %alloca1.sroa.16.8.insert.insert, %alloca1.sroa.15.8.insert.shift
+ %alloca1.sroa.14.8.insert.ext = zext i8 %load97 to i64
+ %alloca1.sroa.14.8.insert.shift = shl i64 %alloca1.sroa.14.8.insert.ext, 32
+ %alloca1.sroa.14.8.insert.insert = or i64 %alloca1.sroa.15.8.insert.insert, %alloca1.sroa.14.8.insert.shift
+ %alloca1.sroa.13.8.insert.ext = zext i8 %load91 to i64
+ %alloca1.sroa.13.8.insert.shift = shl i64 %alloca1.sroa.13.8.insert.ext, 24
+ %alloca1.sroa.13.8.insert.insert = or i64 %alloca1.sroa.14.8.insert.insert, %alloca1.sroa.13.8.insert.shift
+ %alloca1.sroa.12.8.insert.ext = zext i8 %load85 to i64
+ %alloca1.sroa.12.8.insert.shift = shl i64 %alloca1.sroa.12.8.insert.ext, 16
+ %alloca1.sroa.11.8.insert.ext = zext i8 %load79 to i64
+ %alloca1.sroa.11.8.insert.shift = shl i64 %alloca1.sroa.11.8.insert.ext, 8
+ %alloca1.sroa.11.8.insert.mask = or i64 %alloca1.sroa.13.8.insert.insert, %alloca1.sroa.12.8.insert.shift
+ %alloca1.sroa.9.8.insert.ext = zext i8 %load73 to i64
+ %alloca1.sroa.9.8.insert.mask = or i64 %alloca1.sroa.11.8.insert.mask, %alloca1.sroa.11.8.insert.shift
+ %alloca1.sroa.9.8.insert.insert = or i64 %alloca1.sroa.9.8.insert.mask, %alloca1.sroa.9.8.insert.ext
+ %call225 = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 %alloca1.sroa.9.8.insert.insert, i64 0, <16 x float> %call, i32 0, i32 0, i32 0)
+ %alloca1.sroa.26.16.insert.ext = zext i8 %load163 to i64
+ %alloca1.sroa.26.16.insert.shift = shl i64 %alloca1.sroa.26.16.insert.ext, 56
+ %alloca1.sroa.25.16.insert.ext = zext i8 %load157 to i64
+ %alloca1.sroa.25.16.insert.shift = shl i64 %alloca1.sroa.25.16.insert.ext, 48
+ %alloca1.sroa.25.16.insert.insert = or i64 %alloca1.sroa.26.16.insert.shift, %alloca1.sroa.25.16.insert.shift
+ %alloca1.sroa.24.16.insert.ext = zext i8 %load151 to i64
+ %alloca1.sroa.24.16.insert.shift = shl i64 %alloca1.sroa.24.16.insert.ext, 40
+ %alloca1.sroa.24.16.insert.insert = or i64 %alloca1.sroa.25.16.insert.insert, %alloca1.sroa.24.16.insert.shift
+ %alloca1.sroa.23.16.insert.ext = zext i8 %load145 to i64
+ %alloca1.sroa.23.16.insert.shift = shl i64 %alloca1.sroa.23.16.insert.ext, 32
+ %alloca1.sroa.23.16.insert.insert = or i64 %alloca1.sroa.24.16.insert.insert, %alloca1.sroa.23.16.insert.shift
+ %alloca1.sroa.22.16.insert.ext = zext i8 %load139 to i64
+ %alloca1.sroa.22.16.insert.shift = shl i64 %alloca1.sroa.22.16.insert.ext, 24
+ %alloca1.sroa.22.16.insert.insert = or i64 %alloca1.sroa.23.16.insert.insert, %alloca1.sroa.22.16.insert.shift
+ %alloca1.sroa.21.16.insert.ext = zext i8 %load133 to i64
+ %alloca1.sroa.21.16.insert.shift = shl i64 %alloca1.sroa.21.16.insert.ext, 16
+ %alloca1.sroa.20.16.insert.ext = zext i8 %load127 to i64
+ %alloca1.sroa.20.16.insert.shift = shl i64 %alloca1.sroa.20.16.insert.ext, 8
+ %alloca1.sroa.20.16.insert.mask = or i64 %alloca1.sroa.22.16.insert.insert, %alloca1.sroa.21.16.insert.shift
+ %alloca1.sroa.18.16.insert.ext = zext i8 %load121 to i64
+ %alloca1.sroa.18.16.insert.mask = or i64 %alloca1.sroa.20.16.insert.mask, %alloca1.sroa.20.16.insert.shift
+ %alloca1.sroa.18.16.insert.insert = or i64 %alloca1.sroa.18.16.insert.mask, %alloca1.sroa.18.16.insert.ext
+ %call230 = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 %alloca1.sroa.18.16.insert.insert, i64 0, <16 x float> %call225, i32 0, i32 0, i32 0)
+ %alloca1.sroa.35.24.insert.ext = zext i8 %load211 to i64
+ %alloca1.sroa.35.24.insert.shift = shl i64 %alloca1.sroa.35.24.insert.ext, 56
+ %alloca1.sroa.34.24.insert.ext = zext i8 %load205 to i64
+ %alloca1.sroa.34.24.insert.shift = shl i64 %alloca1.sroa.34.24.insert.ext, 48
+ %alloca1.sroa.34.24.insert.insert = or i64 %alloca1.sroa.35.24.insert.shift, %alloca1.sroa.34.24.insert.shift
+ %alloca1.sroa.33.24.insert.ext = zext i8 %load199 to i64
+ %alloca1.sroa.33.24.insert.shift = shl i64 %alloca1.sroa.33.24.insert.ext, 40
+ %alloca1.sroa.33.24.insert.insert = or i64 %alloca1.sroa.34.24.insert.insert, %alloca1.sroa.33.24.insert.shift
+ %alloca1.sroa.32.24.insert.ext = zext i8 %load193 to i64
+ %alloca1.sroa.32.24.insert.shift = shl i64 %alloca1.sroa.32.24.insert.ext, 32
+ %alloca1.sroa.32.24.insert.insert = or i64 %alloca1.sroa.33.24.insert.insert, %alloca1.sroa.32.24.insert.shift
+ %alloca1.sroa.31.24.insert.ext = zext i8 %load187 to i64
+ %alloca1.sroa.31.24.insert.shift = shl i64 %alloca1.sroa.31.24.insert.ext, 24
+ %alloca1.sroa.31.24.insert.insert = or i64 %alloca1.sroa.32.24.insert.insert, %alloca1.sroa.31.24.insert.shift
+ %alloca1.sroa.30.24.insert.ext = zext i8 %load181 to i64
+ %alloca1.sroa.30.24.insert.shift = shl i64 %alloca1.sroa.30.24.insert.ext, 16
+ %alloca1.sroa.29.24.insert.ext = zext i8 %load175 to i64
+ %alloca1.sroa.29.24.insert.shift = shl i64 %alloca1.sroa.29.24.insert.ext, 8
+ %alloca1.sroa.29.24.insert.mask = or i64 %alloca1.sroa.31.24.insert.insert, %alloca1.sroa.30.24.insert.shift
+ %alloca1.sroa.27.24.insert.ext = zext i8 %load169 to i64
+ %alloca1.sroa.27.24.insert.mask = or i64 %alloca1.sroa.29.24.insert.mask, %alloca1.sroa.29.24.insert.shift
+ %alloca1.sroa.27.24.insert.insert = or i64 %alloca1.sroa.27.24.insert.mask, %alloca1.sroa.27.24.insert.ext
+ %call235 = call <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64 %alloca1.sroa.27.24.insert.insert, i64 0, <16 x float> %call230, i32 0, i32 0, i32 0)
+ ret <16 x float> %call235
+}
+
+; Function Attrs: convergent nocallback nofree nosync nounwind willreturn memory(none)
+declare <16 x float> @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8(i64, i64, <16 x float>, i32 immarg, i32 immarg, i32 immarg) #0
+
+; uselistorder directives
+uselistorder ptr @llvm.amdgcn.mfma.f32.32x32x16.fp8.fp8, { 3, 2, 1, 0 }
----------------
arsenm wrote:
```suggestion
```
https://github.com/llvm/llvm-project/pull/158036
More information about the llvm-commits
mailing list