[llvm] [LSV] Merge contiguous chains across scalar types (PR #154069)

Anshil Gandhi via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 2 15:14:17 PST 2025


================
@@ -1,57 +1,273 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
 ; RUN: opt -mtriple=amdgcn-amd-amdhsa -passes=load-store-vectorizer -S -o - %s | FileCheck %s
 
-define void @merge_i32_v2i16_f32_v4i8(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
-; CHECK-LABEL: define void @merge_i32_v2i16_f32_v4i8(
+define void @no_merge_i16_half(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
+; CHECK-LABEL: define void @no_merge_i16_half(
 ; CHECK-SAME: ptr addrspace(1) [[PTR1:%.*]], ptr addrspace(2) [[PTR2:%.*]]) {
-; CHECK-NEXT:    [[GEP1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[PTR1]], i64 0
-; CHECK-NEXT:    [[LOAD1:%.*]] = load i32, ptr addrspace(1) [[GEP1]], align 4
-; CHECK-NEXT:    [[GEP2:%.*]] = getelementptr inbounds <2 x i16>, ptr addrspace(1) [[PTR1]], i64 1
-; CHECK-NEXT:    [[LOAD2:%.*]] = load <2 x i16>, ptr addrspace(1) [[GEP2]], align 4
-; CHECK-NEXT:    [[GEP3:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[PTR1]], i64 2
-; CHECK-NEXT:    [[LOAD3:%.*]] = load float, ptr addrspace(1) [[GEP3]], align 4
-; CHECK-NEXT:    [[GEP4:%.*]] = getelementptr inbounds <4 x i8>, ptr addrspace(1) [[PTR1]], i64 3
-; CHECK-NEXT:    [[LOAD4:%.*]] = load <4 x i8>, ptr addrspace(1) [[GEP4]], align 4
-; CHECK-NEXT:    [[STORE_GEP1:%.*]] = getelementptr inbounds i32, ptr addrspace(2) [[PTR2]], i64 0
-; CHECK-NEXT:    store i32 [[LOAD1]], ptr addrspace(2) [[STORE_GEP1]], align 4
-; CHECK-NEXT:    [[STORE_GEP2:%.*]] = getelementptr inbounds <2 x i16>, ptr addrspace(2) [[PTR2]], i64 1
-; CHECK-NEXT:    store <2 x i16> [[LOAD2]], ptr addrspace(2) [[STORE_GEP2]], align 4
-; CHECK-NEXT:    [[STORE_GEP3:%.*]] = getelementptr inbounds float, ptr addrspace(2) [[PTR2]], i64 2
-; CHECK-NEXT:    store float [[LOAD3]], ptr addrspace(2) [[STORE_GEP3]], align 4
-; CHECK-NEXT:    [[STORE_GEP4:%.*]] = getelementptr inbounds <4 x i8>, ptr addrspace(2) [[PTR2]], i64 3
-; CHECK-NEXT:    store <4 x i8> [[LOAD4]], ptr addrspace(2) [[STORE_GEP4]], align 4
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr inbounds i16, ptr addrspace(1) [[PTR1]], i64 1
+; CHECK-NEXT:    [[LOAD_0:%.*]] = load i16, ptr addrspace(1) [[PTR1]], align 2
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load half, ptr addrspace(1) [[GEP_1]], align 2
+; CHECK-NEXT:    [[STORE_GEP_1:%.*]] = getelementptr inbounds i16, ptr addrspace(2) [[PTR2]], i64 1
+; CHECK-NEXT:    store i16 [[LOAD_0]], ptr addrspace(2) [[PTR2]], align 2
+; CHECK-NEXT:    store half [[LOAD_1]], ptr addrspace(2) [[STORE_GEP_1]], align 2
 ; CHECK-NEXT:    ret void
 ;
-  %gep1 = getelementptr inbounds i32, ptr addrspace(1) %ptr1, i64 0
-  %load1 = load i32, ptr addrspace(1) %gep1, align 4
-  %gep2 = getelementptr inbounds <2 x i16>, ptr addrspace(1) %ptr1, i64 1
-  %load2 = load <2 x i16>, ptr addrspace(1) %gep2, align 4
-  %gep3 = getelementptr inbounds float, ptr addrspace(1) %ptr1, i64 2
-  %load3 = load float, ptr addrspace(1) %gep3, align 4
-  %gep4 = getelementptr inbounds <4 x i8>, ptr addrspace(1) %ptr1, i64 3
-  %load4 = load <4 x i8>, ptr addrspace(1) %gep4, align 4
-  %store.gep1 = getelementptr inbounds i32, ptr addrspace(2) %ptr2, i64 0
-  store i32 %load1, ptr addrspace(2) %store.gep1, align 4
-  %store.gep2 = getelementptr inbounds <2 x i16>, ptr addrspace(2) %ptr2, i64 1
-  store <2 x i16> %load2, ptr addrspace(2) %store.gep2, align 4
-  %store.gep3 = getelementptr inbounds float, ptr addrspace(2) %ptr2, i64 2
-  store float %load3, ptr addrspace(2) %store.gep3, align 4
-  %store.gep4 = getelementptr inbounds <4 x i8>, ptr addrspace(2) %ptr2, i64 3
-  store <4 x i8> %load4, ptr addrspace(2) %store.gep4, align 4
+  %gep.1 = getelementptr inbounds i16, ptr addrspace(1) %ptr1, i64 1
+  %load.0 = load i16, ptr addrspace(1) %ptr1
+  %load.1 = load half, ptr addrspace(1) %gep.1
+  %store.gep.1 = getelementptr inbounds i16, ptr addrspace(2) %ptr2, i64 1
+  store i16 %load.0, ptr addrspace(2) %ptr2
+  store half %load.1, ptr addrspace(2) %store.gep.1
+  ret void
+}
+
+define void @no_merge_i16_float(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
+; CHECK-LABEL: define void @no_merge_i16_float(
+; CHECK-SAME: ptr addrspace(1) [[PTR1:%.*]], ptr addrspace(2) [[PTR2:%.*]]) {
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr inbounds i16, ptr addrspace(1) [[PTR1]], i64 1
+; CHECK-NEXT:    [[LOAD_0:%.*]] = load i16, ptr addrspace(1) [[PTR1]], align 2
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load float, ptr addrspace(1) [[GEP_1]], align 4
+; CHECK-NEXT:    [[STORE_GEP_1:%.*]] = getelementptr inbounds i16, ptr addrspace(2) [[PTR2]], i64 1
+; CHECK-NEXT:    store i16 [[LOAD_0]], ptr addrspace(2) [[PTR2]], align 2
+; CHECK-NEXT:    store float [[LOAD_1]], ptr addrspace(2) [[STORE_GEP_1]], align 4
+; CHECK-NEXT:    ret void
+;
+  %gep.1 = getelementptr inbounds i16, ptr addrspace(1) %ptr1, i64 1
+  %load.0 = load i16, ptr addrspace(1) %ptr1
+  %load.1 = load float, ptr addrspace(1) %gep.1
+  %store.gep.1 = getelementptr inbounds i16, ptr addrspace(2) %ptr2, i64 1
+  store i16 %load.0, ptr addrspace(2) %ptr2
+  store float %load.1, ptr addrspace(2) %store.gep.1
+  ret void
+}
+
+define void @merge_i32_v2i16(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
+; CHECK-LABEL: define void @merge_i32_v2i16(
+; CHECK-SAME: ptr addrspace(1) [[PTR1:%.*]], ptr addrspace(2) [[PTR2:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr addrspace(1) [[PTR1]], align 4
+; CHECK-NEXT:    [[LOAD_01:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LOAD_1_MUT2:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[LOAD_1_MUT_BC:%.*]] = bitcast i32 [[LOAD_1_MUT2]] to <2 x i16>
+; CHECK-NEXT:    [[LOAD_1_BC:%.*]] = bitcast <2 x i16> [[LOAD_1_MUT_BC]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i32> poison, i32 [[LOAD_01]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> [[TMP2]], i32 [[LOAD_1_BC]], i32 1
+; CHECK-NEXT:    store <2 x i32> [[TMP3]], ptr addrspace(2) [[PTR2]], align 4
+; CHECK-NEXT:    ret void
+;
+  %gep.1 = getelementptr inbounds i32, ptr addrspace(1) %ptr1, i64 1
+  %load.0 = load i32, ptr addrspace(1) %ptr1
+  %load.1 = load <2 x i16>, ptr addrspace(1) %gep.1
+  %store.gep.1 = getelementptr inbounds i32, ptr addrspace(2) %ptr2, i64 1
+  store i32 %load.0, ptr addrspace(2) %ptr2
+  store <2 x i16> %load.1, ptr addrspace(2) %store.gep.1
+  ret void
+}
+
+define void @no_merge_i32_ptr(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
+; CHECK-LABEL: define void @no_merge_i32_ptr(
+; CHECK-SAME: ptr addrspace(1) [[PTR1:%.*]], ptr addrspace(2) [[PTR2:%.*]]) {
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[PTR1]], i64 1
+; CHECK-NEXT:    [[LOAD_0:%.*]] = load i32, ptr addrspace(1) [[PTR1]], align 4
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load ptr, ptr addrspace(1) [[GEP_1]], align 8
+; CHECK-NEXT:    [[STORE_GEP_1:%.*]] = getelementptr inbounds i32, ptr addrspace(2) [[PTR2]], i64 1
+; CHECK-NEXT:    store i32 [[LOAD_0]], ptr addrspace(2) [[PTR2]], align 4
+; CHECK-NEXT:    store ptr [[LOAD_1]], ptr addrspace(2) [[STORE_GEP_1]], align 8
+; CHECK-NEXT:    ret void
+;
+  %gep.1 = getelementptr inbounds i32, ptr addrspace(1) %ptr1, i64 1
+  %load.0 = load i32, ptr addrspace(1) %ptr1
+  %load.1 = load ptr, ptr addrspace(1) %gep.1
+  %store.gep.1 = getelementptr inbounds i32, ptr addrspace(2) %ptr2, i64 1
+  store i32 %load.0, ptr addrspace(2) %ptr2
+  store ptr %load.1, ptr addrspace(2) %store.gep.1
   ret void
 }
 
-define void @merge_f32_v2f16_type(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
-; CHECK-LABEL: define void @merge_f32_v2f16_type(
+define void @no_merge_i32_half(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
+; CHECK-LABEL: define void @no_merge_i32_half(
+; CHECK-SAME: ptr addrspace(1) [[PTR1:%.*]], ptr addrspace(2) [[PTR2:%.*]]) {
+; CHECK-NEXT:    [[GEP_1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[PTR1]], i64 1
+; CHECK-NEXT:    [[LOAD_0:%.*]] = load i32, ptr addrspace(1) [[PTR1]], align 4
+; CHECK-NEXT:    [[LOAD_1:%.*]] = load half, ptr addrspace(1) [[GEP_1]], align 2
+; CHECK-NEXT:    [[STORE_GEP_1:%.*]] = getelementptr inbounds i32, ptr addrspace(2) [[PTR2]], i64 1
+; CHECK-NEXT:    store i32 [[LOAD_0]], ptr addrspace(2) [[PTR2]], align 4
+; CHECK-NEXT:    store half [[LOAD_1]], ptr addrspace(2) [[STORE_GEP_1]], align 2
+; CHECK-NEXT:    ret void
+;
+  %gep.1 = getelementptr inbounds i32, ptr addrspace(1) %ptr1, i64 1
+  %load.0 = load i32, ptr addrspace(1) %ptr1
+  %load.1 = load half, ptr addrspace(1) %gep.1
+  %store.gep.1 = getelementptr inbounds i32, ptr addrspace(2) %ptr2, i64 1
+  store i32 %load.0, ptr addrspace(2) %ptr2
+  store half %load.1, ptr addrspace(2) %store.gep.1
+  ret void
+}
+
+define void @merge_i32_float(ptr addrspace(1) %ptr1, ptr addrspace(2) %ptr2) {
+; CHECK-LABEL: define void @merge_i32_float(
+; CHECK-SAME: ptr addrspace(1) [[PTR1:%.*]], ptr addrspace(2) [[PTR2:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr addrspace(1) [[PTR1]], align 4
+; CHECK-NEXT:    [[LOAD_01:%.*]] = extractelement <2 x i32> [[TMP1]], i32 0
+; CHECK-NEXT:    [[LOAD_12:%.*]] = extractelement <2 x i32> [[TMP1]], i32 1
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[LOAD_12]] to float
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i32> poison, i32 [[LOAD_01]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast float [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP4]], i32 1
+; CHECK-NEXT:    store <2 x i32> [[TMP5]], ptr addrspace(2) [[PTR2]], align 4
+; CHECK-NEXT:    ret void
+;
+  %gep.1 = getelementptr inbounds i32, ptr addrspace(1) %ptr1, i64 1
+  %load.0 = load i32, ptr addrspace(1) %ptr1
+  %load.1 = load float, ptr addrspace(1) %gep.1
+  %store.gep.1 = getelementptr inbounds i32, ptr addrspace(2) %ptr2, i64 1
+  store i32 %load.0, ptr addrspace(2) %ptr2
+  store float %load.1, ptr addrspace(2) %store.gep.1
+  ret void
+}
----------------
gandhi56 wrote:

Correct

https://github.com/llvm/llvm-project/pull/154069


More information about the llvm-commits mailing list