[llvm] ce7ca2c - AMDGPU/GlobalISel: Select llvm.amdgcn.struct.buffer.load
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 27 10:14:12 PST 2020
Author: Matt Arsenault
Date: 2020-01-27T13:05:55-05:00
New Revision: ce7ca2caf2c2c71f6c003668c99729ee2f9c29e3
URL: https://github.com/llvm/llvm-project/commit/ce7ca2caf2c2c71f6c003668c99729ee2f9c29e3
DIFF: https://github.com/llvm/llvm-project/commit/ce7ca2caf2c2c71f6c003668c99729ee2f9c29e3.diff
LOG: AMDGPU/GlobalISel: Select llvm.amdgcn.struct.buffer.load
Added:
llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
Modified:
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.struct.buffer.load.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 5420dcb5c819..c76833518184 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2435,10 +2435,10 @@ bool AMDGPULegalizerInfo::legalizeRawBufferStore(MachineInstr &MI,
return Ty == S32;
}
-bool AMDGPULegalizerInfo::legalizeRawBufferLoad(MachineInstr &MI,
- MachineRegisterInfo &MRI,
- MachineIRBuilder &B,
- bool IsFormat) const {
+bool AMDGPULegalizerInfo::legalizeBufferLoad(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ MachineIRBuilder &B,
+ bool IsFormat) const {
B.setInstr(MI);
// FIXME: Verifier should enforce 1 MMO for these intrinsics.
@@ -2448,9 +2448,19 @@ bool AMDGPULegalizerInfo::legalizeRawBufferLoad(MachineInstr &MI,
Register Dst = MI.getOperand(0).getReg();
Register RSrc = MI.getOperand(2).getReg();
- Register VOffset = MI.getOperand(3).getReg();
- Register SOffset = MI.getOperand(4).getReg();
- unsigned AuxiliaryData = MI.getOperand(5).getImm();
+
+ // The struct intrinsic variants add one additional operand over raw.
+ const bool HasVIndex = MI.getNumOperands() == 7;
+ Register VIndex;
+ int OpOffset = 0;
+ if (HasVIndex) {
+ VIndex = MI.getOperand(3).getReg();
+ OpOffset = 1;
+ }
+
+ Register VOffset = MI.getOperand(3 + OpOffset).getReg();
+ Register SOffset = MI.getOperand(4 + OpOffset).getReg();
+ unsigned AuxiliaryData = MI.getOperand(5 + OpOffset).getImm();
unsigned ImmOffset;
unsigned TotalOffset;
@@ -2493,17 +2503,18 @@ bool AMDGPULegalizerInfo::legalizeRawBufferLoad(MachineInstr &MI,
else
LoadDstReg = Dst;
- Register VIndex = B.buildConstant(S32, 0).getReg(0);
+ if (!VIndex)
+ VIndex = B.buildConstant(S32, 0).getReg(0);
B.buildInstr(Opc)
- .addDef(LoadDstReg) // vdata
- .addUse(RSrc) // rsrc
- .addUse(VIndex) // vindex
- .addUse(VOffset) // voffset
- .addUse(SOffset) // soffset
- .addImm(ImmOffset) // offset(imm)
- .addImm(AuxiliaryData) // cachepolicy, swizzled buffer(imm)
- .addImm(0) // idxen(imm)
+ .addDef(LoadDstReg) // vdata
+ .addUse(RSrc) // rsrc
+ .addUse(VIndex) // vindex
+ .addUse(VOffset) // voffset
+ .addUse(SOffset) // soffset
+ .addImm(ImmOffset) // offset(imm)
+ .addImm(AuxiliaryData) // cachepolicy, swizzled buffer(imm)
+ .addImm(HasVIndex ? -1 : 0) // idxen(imm)
.addMemOperand(MMO);
if (LoadDstReg != Dst) {
@@ -2662,9 +2673,10 @@ bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
case Intrinsic::amdgcn_raw_buffer_store_format:
return legalizeRawBufferStore(MI, MRI, B, true);
case Intrinsic::amdgcn_raw_buffer_load:
- return legalizeRawBufferLoad(MI, MRI, B, false);
+ case Intrinsic::amdgcn_struct_buffer_load:
+ return legalizeBufferLoad(MI, MRI, B, false);
case Intrinsic::amdgcn_raw_buffer_load_format:
- return legalizeRawBufferLoad(MI, MRI, B, true);
+ return legalizeBufferLoad(MI, MRI, B, true);
case Intrinsic::amdgcn_atomic_inc:
return legalizeAtomicIncDec(MI, B, true);
case Intrinsic::amdgcn_atomic_dec:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index c050f7a5fd90..89886a32282d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -114,6 +114,9 @@ class AMDGPULegalizerInfo : public LegalizerInfo {
MachineIRBuilder &B, bool IsFormat) const;
bool legalizeRawBufferLoad(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &B, bool IsFormat) const;
+ bool legalizeBufferLoad(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &B, bool IsFormat) const;
+
bool legalizeAtomicIncDec(MachineInstr &MI, MachineIRBuilder &B,
bool IsInc) const;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 2a25fcc2c097..ba5cb38057dc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -2343,9 +2343,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
executeInWaterfallLoop(MI, MRI, {2, 4});
return;
}
- case Intrinsic::amdgcn_struct_buffer_load:
case Intrinsic::amdgcn_struct_buffer_store:
- case Intrinsic::amdgcn_struct_tbuffer_load:
case Intrinsic::amdgcn_struct_tbuffer_store: {
applyDefaultMapping(OpdMapper);
executeInWaterfallLoop(MI, MRI, {2, 5});
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
new file mode 100644
index 000000000000..d07f0a82b470
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.struct.buffer.load.ll
@@ -0,0 +1,425 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
+; RUN: llc -global-isel -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx810 -stop-after=instruction-select -verify-machineinstrs -o - %s | FileCheck %s
+
+; Natural mapping
+define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORD_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret float %val
+}
+
+; Natural mapping
+define amdgpu_ps <2 x float> @struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_v2f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORDX2_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 8 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub0
+ ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub1
+ ; CHECK: $vgpr0 = COPY [[COPY7]]
+ ; CHECK: $vgpr1 = COPY [[COPY8]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ %val = call <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret <2 x float> %val
+}
+
+; Natural mapping
+define amdgpu_ps <3 x float> @struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_v3f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORDX3_BOTHEN:%[0-9]+]]:vreg_96 = BUFFER_LOAD_DWORDX3_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 12 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub0
+ ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub1
+ ; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX3_BOTHEN]].sub2
+ ; CHECK: $vgpr0 = COPY [[COPY7]]
+ ; CHECK: $vgpr1 = COPY [[COPY8]]
+ ; CHECK: $vgpr2 = COPY [[COPY9]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+ %val = call <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret <3 x float> %val
+}
+
+; Natural mapping
+define amdgpu_ps <4 x float> @struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_v4f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORDX4_BOTHEN:%[0-9]+]]:vreg_128 = BUFFER_LOAD_DWORDX4_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub0
+ ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub1
+ ; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub2
+ ; CHECK: [[COPY10:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX4_BOTHEN]].sub3
+ ; CHECK: $vgpr0 = COPY [[COPY7]]
+ ; CHECK: $vgpr1 = COPY [[COPY8]]
+ ; CHECK: $vgpr2 = COPY [[COPY9]]
+ ; CHECK: $vgpr3 = COPY [[COPY10]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+ %val = call <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret <4 x float> %val
+}
+
+; Natural mapping
+define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0(<4 x i32> inreg %rsrc, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_vindex0
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY6]], %subreg.sub0, [[COPY4]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORD_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY5]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 0, i32 %voffset, i32 %soffset, i32 0)
+ ret float %val
+}
+
+; Natural mapping
+define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset.base, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_voffset_add4095
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORD_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 4095, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7" + 4095, align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %voffset = add i32 %voffset.base, 4095
+ %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret float %val
+}
+
+define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_soffset_64
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 64
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORD_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[S_MOV_B32_]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 64, i32 0)
+ ret float %val
+}
+
+; Need to legalize all reg operands
+define amdgpu_ps float @struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset(<4 x i32> %rsrc, i32 inreg %vindex, i32 inreg %voffset, i32 %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_f32__vgpr_rsrc__sgpr_vindex__sgpr_voffset__vgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: successors: %bb.2(0x80000000)
+ ; CHECK: liveins: $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
+ ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+ ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+ ; CHECK: [[COPY4:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY5:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY6:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[COPY4]]
+ ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[COPY5]]
+ ; CHECK: [[COPY9:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub0_sub1
+ ; CHECK: [[COPY10:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]].sub2_sub3
+ ; CHECK: [[S_MOV_B64_term:%[0-9]+]]:sreg_64_xexec = S_MOV_B64_term $exec
+ ; CHECK: bb.2:
+ ; CHECK: successors: %bb.3(0x40000000), %bb.2(0x40000000)
+ ; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub0, implicit $exec
+ ; CHECK: [[V_READFIRSTLANE_B32_1:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY9]].sub1, implicit $exec
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1
+ ; CHECK: [[V_CMP_EQ_U64_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE1]], [[COPY9]], implicit $exec
+ ; CHECK: [[V_READFIRSTLANE_B32_2:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub0, implicit $exec
+ ; CHECK: [[V_READFIRSTLANE_B32_3:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY10]].sub1, implicit $exec
+ ; CHECK: [[REG_SEQUENCE2:%[0-9]+]]:sreg_64_xexec = REG_SEQUENCE [[V_READFIRSTLANE_B32_2]], %subreg.sub0, [[V_READFIRSTLANE_B32_3]], %subreg.sub1
+ ; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[REG_SEQUENCE2]], [[COPY10]], implicit $exec
+ ; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
+ ; CHECK: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[V_READFIRSTLANE_B32_]], %subreg.sub0, [[V_READFIRSTLANE_B32_1]], %subreg.sub1, [[V_READFIRSTLANE_B32_2]], %subreg.sub2, [[V_READFIRSTLANE_B32_3]], %subreg.sub3
+ ; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0 = V_READFIRSTLANE_B32 [[COPY6]], implicit $exec
+ ; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]], [[COPY6]], implicit $exec
+ ; CHECK: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
+ ; CHECK: [[REG_SEQUENCE4:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY7]], %subreg.sub0, [[COPY8]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORD_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN [[REG_SEQUENCE4]], [[REG_SEQUENCE3]], [[V_READFIRSTLANE_B32_4]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
+ ; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
+ ; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
+ ; CHECK: bb.3:
+ ; CHECK: successors: %bb.4(0x80000000)
+ ; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
+ ; CHECK: bb.4:
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret float %val
+}
+
+define amdgpu_ps float @struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_i8_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_UBYTE_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 1 from custom "TargetCustom7", addrspace 4)
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 255
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[BUFFER_LOAD_UBYTE_BOTHEN]], [[COPY7]], implicit $exec
+ ; CHECK: $vgpr0 = COPY [[V_AND_B32_e64_]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ %ext = zext i8 %val to i32
+ %cast = bitcast i32 %ext to float
+ ret float %cast
+}
+
+define amdgpu_ps float @struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_i8_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_UBYTE_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_UBYTE_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 1 from custom "TargetCustom7", addrspace 4)
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 24
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY7]], [[BUFFER_LOAD_UBYTE_BOTHEN]], implicit $exec
+ ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY8]], [[V_LSHLREV_B32_e64_]], implicit $exec
+ ; CHECK: $vgpr0 = COPY [[V_ASHRREV_I32_e64_]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ %ext = sext i8 %val to i32
+ %cast = bitcast i32 %ext to float
+ ret float %cast
+}
+
+define amdgpu_ps float @struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_i16_zext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_USHORT_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 2 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 65535
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK: [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 [[BUFFER_LOAD_USHORT_BOTHEN]], [[COPY7]], implicit $exec
+ ; CHECK: $vgpr0 = COPY [[V_AND_B32_e64_]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ %ext = zext i16 %val to i32
+ %cast = bitcast i32 %ext to float
+ ret float %cast
+}
+
+define amdgpu_ps float @struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_i16_sext__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_USHORT_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 2 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 16
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK: [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 [[COPY7]], [[BUFFER_LOAD_USHORT_BOTHEN]], implicit $exec
+ ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]]
+ ; CHECK: [[V_ASHRREV_I32_e64_:%[0-9]+]]:vgpr_32 = V_ASHRREV_I32_e64 [[COPY8]], [[V_LSHLREV_B32_e64_]], implicit $exec
+ ; CHECK: $vgpr0 = COPY [[V_ASHRREV_I32_e64_]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ %ext = sext i16 %val to i32
+ %cast = bitcast i32 %ext to float
+ ret float %cast
+}
+
+; Natural mapping
+define amdgpu_ps half @struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_USHORT_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_USHORT_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 2 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_USHORT_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call half @llvm.amdgcn.struct.buffer.load.f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret half %val
+}
+
+; Natural mapping
+define amdgpu_ps <2 x half> @struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_v2f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORD_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call <2 x half> @llvm.amdgcn.struct.buffer.load.v2f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret <2 x half> %val
+}
+
+; FIXME: Crashes
+; define amdgpu_ps <3 x half> @struct_buffer_load_v3f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+; %val = call <3 x half> @llvm.amdgcn.struct.buffer.load.v3f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+; ret <3 x half> %val
+; }
+
+; Natural mapping
+define amdgpu_ps <4 x half> @struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_v4f16__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORDX2_BOTHEN:%[0-9]+]]:vreg_64 = BUFFER_LOAD_DWORDX2_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 8 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[COPY7:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub0
+ ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[BUFFER_LOAD_DWORDX2_BOTHEN]].sub1
+ ; CHECK: $vgpr0 = COPY [[COPY7]]
+ ; CHECK: $vgpr1 = COPY [[COPY8]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1
+ %val = call <4 x half> @llvm.amdgcn.struct.buffer.load.v4f16(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
+ ret <4 x half> %val
+}
+
+; Natural mapping + glc
+define amdgpu_ps float @struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc(<4 x i32> inreg %rsrc, i32 %vindex, i32 %voffset, i32 inreg %soffset) {
+ ; CHECK-LABEL: name: struct_buffer_load_f32__sgpr_rsrc__vgpr_vindex__vgpr_voffset__sgpr_soffset_glc
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK: liveins: $sgpr2, $sgpr3, $sgpr4, $sgpr5, $sgpr6, $vgpr0, $vgpr1
+ ; CHECK: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr2
+ ; CHECK: [[COPY1:%[0-9]+]]:sreg_32 = COPY $sgpr3
+ ; CHECK: [[COPY2:%[0-9]+]]:sreg_32 = COPY $sgpr4
+ ; CHECK: [[COPY3:%[0-9]+]]:sreg_32 = COPY $sgpr5
+ ; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK: [[COPY5:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK: [[COPY6:%[0-9]+]]:sreg_32 = COPY $sgpr6
+ ; CHECK: [[REG_SEQUENCE:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1, [[COPY2]], %subreg.sub2, [[COPY3]], %subreg.sub3
+ ; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY5]], %subreg.sub1
+ ; CHECK: [[BUFFER_LOAD_DWORD_BOTHEN:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_BOTHEN [[REG_SEQUENCE1]], [[REG_SEQUENCE]], [[COPY6]], 0, 1, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[BUFFER_LOAD_DWORD_BOTHEN]]
+ ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+ %val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 1)
+ ret float %val
+}
+
+declare i8 @llvm.amdgcn.struct.buffer.load.i8(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare i16 @llvm.amdgcn.struct.buffer.load.i16(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare <2 x float> @llvm.amdgcn.struct.buffer.load.v2f32(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare <3 x float> @llvm.amdgcn.struct.buffer.load.v3f32(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare <4 x float> @llvm.amdgcn.struct.buffer.load.v4f32(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare half @llvm.amdgcn.struct.buffer.load.f16(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare <2 x half> @llvm.amdgcn.struct.buffer.load.v2f16(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare <3 x half> @llvm.amdgcn.struct.buffer.load.v3f16(<4 x i32>, i32, i32, i32, i32 immarg) #0
+declare <4 x half> @llvm.amdgcn.struct.buffer.load.v4f16(<4 x i32>, i32, i32, i32, i32 immarg) #0
+
+attributes #0 = { nounwind readonly }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.struct.buffer.load.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.struct.buffer.load.ll
index a657488278b0..de1d62812129 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.struct.buffer.load.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-amdgcn.struct.buffer.load.ll
@@ -15,8 +15,8 @@ define amdgpu_ps float @struct_buffer_load__sgpr_rsrc__vgpr_val__vgpr_vindex__vg
; CHECK: [[COPY5:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
; CHECK: [[COPY6:%[0-9]+]]:sgpr(s32) = COPY $sgpr6
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
- ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.struct.buffer.load), [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), 0 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
- ; CHECK: $vgpr0 = COPY [[INT]](s32)
+ ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](s32), [[COPY5]], [[COPY6]], 0, 0, -1 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[AMDGPU_BUFFER_LOAD]](s32)
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -37,8 +37,8 @@ define amdgpu_ps float @struct_buffer_load__sgpr_rsrc__sgpr_val__sgpr_vindex__sg
; CHECK: [[BUILD_VECTOR:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
; CHECK: [[COPY7:%[0-9]+]]:vgpr(s32) = COPY [[COPY4]](s32)
; CHECK: [[COPY8:%[0-9]+]]:vgpr(s32) = COPY [[COPY5]](s32)
- ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.struct.buffer.load), [[BUILD_VECTOR]](<4 x s32>), [[COPY7]](s32), [[COPY8]](s32), [[COPY6]](s32), 0 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
- ; CHECK: $vgpr0 = COPY [[INT]](s32)
+ ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[COPY7]](s32), [[COPY8]], [[COPY6]], 0, 0, -1 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: $vgpr0 = COPY [[AMDGPU_BUFFER_LOAD]](s32)
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -76,7 +76,7 @@ define amdgpu_ps float @struct_buffer_load__vgpr_rsrc__vgpr_val__vgpr_vindex__vg
; CHECK: [[V_CMP_EQ_U64_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U64_e64 [[MV1]](s64), [[UV1]](s64), implicit $exec
; CHECK: [[S_AND_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U64_e64_1]], [[V_CMP_EQ_U64_e64_]], implicit-def $scc
; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:sgpr(<4 x s32>) = G_BUILD_VECTOR [[V_READFIRSTLANE_B32_]](s32), [[V_READFIRSTLANE_B32_1]](s32), [[V_READFIRSTLANE_B32_2]](s32), [[V_READFIRSTLANE_B32_3]](s32)
- ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.struct.buffer.load), [[BUILD_VECTOR1]](<4 x s32>), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), 0 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[COPY4]](s32), [[COPY5]], [[COPY6]], 0, 0, -1 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
@@ -84,7 +84,7 @@ define amdgpu_ps float @struct_buffer_load__vgpr_rsrc__vgpr_val__vgpr_vindex__vg
; CHECK: successors: %bb.4(0x80000000)
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
; CHECK: bb.4:
- ; CHECK: $vgpr0 = COPY [[INT]](s32)
+ ; CHECK: $vgpr0 = COPY [[AMDGPU_BUFFER_LOAD]](s32)
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -113,7 +113,7 @@ define amdgpu_ps float @struct_buffer_load__sgpr_rsrc__vgpr_val__vgpr_vindex_vgp
; CHECK: [[PHI1:%[0-9]+]]:vgpr(s32) = G_PHI [[DEF]](s32), %bb.1, %10(s32), %bb.2
; CHECK: [[V_READFIRSTLANE_B32_:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[COPY6]](s32), implicit $exec
; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_]](s32), [[COPY6]](s32), implicit $exec
- ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.struct.buffer.load), [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](s32), [[COPY5]](s32), [[V_READFIRSTLANE_B32_]](s32), 0 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR]](<4 x s32>), [[COPY4]](s32), [[COPY5]], [[V_READFIRSTLANE_B32_]], 0, 0, -1 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[V_CMP_EQ_U32_e64_]], implicit-def $exec, implicit-def $scc, implicit $exec
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
@@ -121,7 +121,7 @@ define amdgpu_ps float @struct_buffer_load__sgpr_rsrc__vgpr_val__vgpr_vindex_vgp
; CHECK: successors: %bb.4(0x80000000)
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
; CHECK: bb.4:
- ; CHECK: $vgpr0 = COPY [[INT]](s32)
+ ; CHECK: $vgpr0 = COPY [[AMDGPU_BUFFER_LOAD]](s32)
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
@@ -162,7 +162,7 @@ define amdgpu_ps float @struct_buffer_load__vgpr_rsrc__vgpr_val__vgpr_vindex__vg
; CHECK: [[V_READFIRSTLANE_B32_4:%[0-9]+]]:sreg_32_xm0(s32) = V_READFIRSTLANE_B32 [[COPY6]](s32), implicit $exec
; CHECK: [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_EQ_U32_e64 [[V_READFIRSTLANE_B32_4]](s32), [[COPY6]](s32), implicit $exec
; CHECK: [[S_AND_B64_1:%[0-9]+]]:sreg_64_xexec = S_AND_B64 [[V_CMP_EQ_U32_e64_]], [[S_AND_B64_]], implicit-def $scc
- ; CHECK: [[INT:%[0-9]+]]:vgpr(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.struct.buffer.load), [[BUILD_VECTOR1]](<4 x s32>), [[COPY4]](s32), [[COPY5]](s32), [[V_READFIRSTLANE_B32_4]](s32), 0 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
+ ; CHECK: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:vgpr(s32) = G_AMDGPU_BUFFER_LOAD [[BUILD_VECTOR1]](<4 x s32>), [[COPY4]](s32), [[COPY5]], [[V_READFIRSTLANE_B32_4]], 0, 0, -1 :: (dereferenceable load 4 from custom "TargetCustom7", align 1, addrspace 4)
; CHECK: [[S_AND_SAVEEXEC_B64_:%[0-9]+]]:sreg_64_xexec = S_AND_SAVEEXEC_B64 killed [[S_AND_B64_1]], implicit-def $exec, implicit-def $scc, implicit $exec
; CHECK: $exec = S_XOR_B64_term $exec, [[S_AND_SAVEEXEC_B64_]], implicit-def $scc
; CHECK: S_CBRANCH_EXECNZ %bb.2, implicit $exec
@@ -170,7 +170,7 @@ define amdgpu_ps float @struct_buffer_load__vgpr_rsrc__vgpr_val__vgpr_vindex__vg
; CHECK: successors: %bb.4(0x80000000)
; CHECK: $exec = S_MOV_B64_term [[S_MOV_B64_term]]
; CHECK: bb.4:
- ; CHECK: $vgpr0 = COPY [[INT]](s32)
+ ; CHECK: $vgpr0 = COPY [[AMDGPU_BUFFER_LOAD]](s32)
; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
%val = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> %rsrc, i32 %vindex, i32 %voffset, i32 %soffset, i32 0)
ret float %val
More information about the llvm-commits
mailing list