[llvm] [AMDGPU] Add no return image_sample intrinsics and instructions (PR #97542)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 3 01:53:18 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-ir

@llvm/pr-subscribers-backend-amdgpu

Author: Carl Ritson (perlfu)

<details>
<summary>Changes</summary>

An appropriately configured image resource descriptor can trigger image_sample instructions to store outputs directly to a linked memory location instead of returning to VGPRs.

This is opaque to the backend as instruction encoding is unchanged; however, a mechanism is require to allow frontends to communicate that these instructions do not require destination VGPRs and store to memory.
Flagging these as stores means they will not be optimized away.

---

Patch is 55.62 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/97542.diff


12 Files Affected:

- (modified) llvm/include/llvm/IR/IntrinsicsAMDGPU.td (+29) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (+3) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp (+13-4) 
- (modified) llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp (+2) 
- (modified) llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp (+2-1) 
- (modified) llvm/lib/Target/AMDGPU/MIMGInstructions.td (+192-31) 
- (modified) llvm/lib/Target/AMDGPU/SIISelLowering.cpp (+18-13) 
- (modified) llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp (+2-1) 
- (modified) llvm/lib/Target/AMDGPU/SIInstrInfo.cpp (+2) 
- (modified) llvm/lib/Target/AMDGPU/SIInstructions.td (+8) 
- (modified) llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h (+1) 
- (added) llvm/test/CodeGen/AMDGPU/llvm.amdgcn.image.sample.noret.ll (+479) 


``````````diff
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index 71b1e832bde3c..b2d91fd0360c8 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -780,6 +780,20 @@ class AMDGPUDimSampleProfile<string opmod,
   let LodClampMip = sample.LodOrClamp;
 }
 
+class AMDGPUDimSampleNoReturnProfile<string opmod,
+                             AMDGPUDimProps dim,
+                             AMDGPUSampleVariant sample> : AMDGPUDimProfile<opmod, dim> {
+  let IsSample = true;
+  let RetTypes = [];
+  let DataArgs = [];
+  let ExtraAddrArgs = sample.ExtraAddrArgs;
+  let Offset = sample.Offset;
+  let Bias = sample.Bias;
+  let ZCompare = sample.ZCompare;
+  let Gradients = sample.Gradients;
+  let LodClampMip = sample.LodOrClamp;
+}
+
 class AMDGPUDimNoSampleProfile<string opmod,
                                AMDGPUDimProps dim,
                                list<LLVMType> retty,
@@ -970,6 +984,21 @@ defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
         AMDGPUImageDMaskIntrinsic;
   }
 
+  multiclass AMDGPUImageDimSampleNoReturnDims<string opmod,
+                                      AMDGPUSampleVariant sample> {
+    foreach dim = AMDGPUDims.NoMsaa in {
+      def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic<
+          AMDGPUDimSampleNoReturnProfile<opmod, dim, sample>,
+          [IntrWillReturn], [SDNPMemOperand]>;
+    }
+  }
+  foreach sample = AMDGPUSampleVariants in {
+    defm int_amdgcn_image_sample_noret # sample.LowerCaseMod
+      : AMDGPUImageDimSampleNoReturnDims<
+        "SAMPLE" # sample.UpperCaseMod # "_noret", sample>,
+        AMDGPUImageDMaskIntrinsic;
+  }
+
   defm int_amdgcn_image_getlod
     : AMDGPUImageDimSampleDims<"GET_LOD", AMDGPUSample, 1>,
       AMDGPUImageDMaskIntrinsic;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
index dcb0f47973c4a..fd00527d73c42 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
@@ -1883,6 +1883,8 @@ bool AMDGPUInstructionSelector::selectImageIntrinsic(
       VDataIn = MI.getOperand(1).getReg();
       VDataTy = MRI->getType(VDataIn);
       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
+    } else if (BaseOpcode->NoReturn) {
+      NumVDataDwords = 0;
     } else {
       VDataOut = MI.getOperand(0).getReg();
       VDataTy = MRI->getType(VDataOut);
@@ -3629,6 +3631,7 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I) {
     return selectG_INSERT_VECTOR_ELT(I);
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
+  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_NORET:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
     const AMDGPU::ImageDimIntrinsicInfo *Intr =
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index a219d01518458..c99ec79cd5a45 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -6322,8 +6322,13 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
   const LLT V2S16 = LLT::fixed_vector(2, 16);
 
   unsigned DMask = 0;
-  Register VData = MI.getOperand(NumDefs == 0 ? 1 : 0).getReg();
-  LLT Ty = MRI->getType(VData);
+  Register VData;
+  LLT Ty;
+
+  if (!BaseOpcode->NoReturn || BaseOpcode->Store) {
+    VData = MI.getOperand(NumDefs == 0 ? 1 : 0).getReg();
+    Ty = MRI->getType(VData);
+  }
 
   const bool IsAtomicPacked16Bit =
       (BaseOpcode->BaseOpcode == AMDGPU::IMAGE_ATOMIC_PK_ADD_F16 ||
@@ -6361,7 +6366,11 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
                                      : AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE;
   const unsigned LoadOpcode = IsD16 ? AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16
                                     : AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD;
-  unsigned NewOpcode = NumDefs == 0 ? StoreOpcode : LoadOpcode;
+  unsigned NewOpcode = LoadOpcode;
+  if (BaseOpcode->Store)
+    NewOpcode = StoreOpcode;
+  else if (BaseOpcode->NoReturn)
+    NewOpcode = AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_NORET;
 
   // Track that we legalized this
   MI.setDesc(B.getTII().get(NewOpcode));
@@ -6491,7 +6500,7 @@ bool AMDGPULegalizerInfo::legalizeImageIntrinsic(
     Flags |= 2;
   MI.addOperand(MachineOperand::CreateImm(Flags));
 
-  if (BaseOpcode->Store) { // No TFE for stores?
+  if (BaseOpcode->NoReturn) { // No TFE for stores?
     // TODO: Handle dmask trim
     if (!Ty.isVector() || !IsD16)
       return true;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 9e7694f41d6b8..dd30efa826b1b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3173,6 +3173,7 @@ void AMDGPURegisterBankInfo::applyMappingImpl(
   }
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
+  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_NORET:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
     const AMDGPU::RsrcIntrinsic *RSrcIntrin =
@@ -4843,6 +4844,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   }
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
+  case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_NORET:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
     auto IntrID = AMDGPU::getIntrinsicID(MI);
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index b08957d22ee74..0c5a4ef39ae03 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -3864,7 +3864,8 @@ bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst,
   int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
   int TFEIdx   = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
 
-  assert(VDataIdx != -1);
+  if (VDataIdx == -1 && isGFX10Plus()) // no return image_sample
+    return true;
 
   if ((DMaskIdx == -1 || TFEIdx == -1) && isGFX10_AEncoding()) // intersect_ray
     return true;
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index e1468bf850cd7..4826969e4c3ae 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -50,6 +50,7 @@ class MIMGBaseOpcode : PredicateControl {
   bit MSAA = 0;
   bit BVH = 0;
   bit A16 = 0;
+  bit NoReturn = 0;
 }
 
 def MIMGBaseOpcode : GenericEnum {
@@ -61,7 +62,7 @@ def MIMGBaseOpcodesTable : GenericTable {
   let CppTypeName = "MIMGBaseOpcodeInfo";
   let Fields = ["BaseOpcode", "Store", "Atomic", "AtomicX2", "Sampler",
                 "Gather4", "NumExtraArgs", "Gradients", "G16", "Coordinates",
-                "LodOrClampOrMip", "HasD16", "MSAA", "BVH", "A16"];
+                "LodOrClampOrMip", "HasD16", "MSAA", "BVH", "A16", "NoReturn"];
   string TypeOf_BaseOpcode = "MIMGBaseOpcode";
 
   let PrimaryKey = ["BaseOpcode"];
@@ -520,6 +521,25 @@ class VSAMPLE_Sampler_gfx12<mimgopc op, string opcode, RegisterClass DataRC,
                     #!if(BaseOpcode.HasD16, "$d16", "");
 }
 
+class VSAMPLE_Sampler_noret_gfx12<mimgopc op, string opcode,
+                            int num_addrs, RegisterClass Addr3RC = VGPR_32,
+                            string dns="">
+  : VSAMPLE_gfx12<op.GFX12, (outs), num_addrs, dns, Addr3RC> {
+  let InOperandList = !con(AddrIns,
+                           (ins SReg_256:$rsrc),
+                           !if(BaseOpcode.Sampler, (ins SReg_128:$samp), (ins)),
+                           (ins DMask:$dmask, Dim:$dim, UNorm:$unorm,
+                                CPol:$cpol, R128A16:$r128, A16:$a16, TFE:$tfe,
+                                LWE:$lwe),
+                           !if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
+  let AsmString = opcode#" off, "#AddrAsm#", $rsrc"
+                    #!if(BaseOpcode.Sampler, ", $samp", "")
+                    #"$dmask$dim$unorm$cpol$r128$a16$tfe$lwe"
+                    #!if(BaseOpcode.HasD16, "$d16", "");
+  // Force vdata to VGPR0 as no result will be returned.
+  let vdata = 0;
+}
+
 multiclass MIMG_NoSampler_Src_Helper <mimgopc op, string asm,
                                       RegisterClass dst_rc, bit enableDisasm,
                                       bit ExtendedImageInst = 1,
@@ -834,6 +854,7 @@ multiclass MIMG_Store <mimgopc op, string asm, bit has_d16, bit mip = 0> {
     let Store = 1;
     let LodOrClampOrMip = mip;
     let HasD16 = has_d16;
+    let NoReturn = 1;
   }
 
   let BaseOpcode = !cast<MIMGBaseOpcode>(NAME) in {
@@ -1135,44 +1156,62 @@ class MIMG_Sampler_gfx90a<mimgopc op, string asm, RegisterClass dst_rc,
                       #!if(BaseOpcode.HasD16, "$d16", "");
 }
 
+class MIMG_Sampler_OpList_gfx10p<dag OpPrefix, bit HasD16> {
+  dag ret = !con(OpPrefix,
+                 (ins SReg_256:$srsrc, SReg_128:$ssamp,
+                  DMask:$dmask, Dim:$dim, UNorm:$unorm, CPol:$cpol,
+                  R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe),
+                 !if(HasD16, (ins D16:$d16), (ins)));
+}
+
+class MIMG_Sampler_Asm_gfx10p<string opcode, string AsmPrefix, bit HasD16> {
+  string ret = opcode#" "#AsmPrefix#", $srsrc, $ssamp$dmask$dim$unorm"
+               #"$cpol$r128$a16$tfe$lwe"
+               #!if(HasD16, "$d16", "");
+}
+
 class MIMG_Sampler_gfx10<mimgopc op, string opcode,
                          RegisterClass DataRC, RegisterClass AddrRC,
                          string dns="">
   : MIMG_gfx10<op.GFX10M, (outs DataRC:$vdata), dns> {
-  let InOperandList = !con((ins AddrRC:$vaddr0, SReg_256:$srsrc, SReg_128:$ssamp,
-                                DMask:$dmask, Dim:$dim, UNorm:$unorm, CPol:$cpol,
-                                R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe),
-                           !if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
-  let AsmString = opcode#" $vdata, $vaddr0, $srsrc, $ssamp$dmask$dim$unorm"
-                    #"$cpol$r128$a16$tfe$lwe"
-                    #!if(BaseOpcode.HasD16, "$d16", "");
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<(ins AddrRC:$vaddr0), BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, "$vdata, $vaddr0", BaseOpcode.HasD16>.ret;
 }
 
 class MIMG_Sampler_nsa_gfx10<mimgopc op, string opcode,
                              RegisterClass DataRC, int num_addrs,
                              string dns="">
   : MIMG_nsa_gfx10<op.GFX10M, (outs DataRC:$vdata), num_addrs, dns> {
-  let InOperandList = !con(AddrIns,
-                           (ins SReg_256:$srsrc, SReg_128:$ssamp, DMask:$dmask,
-                                Dim:$dim, UNorm:$unorm, CPol:$cpol,
-                                R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe),
-                           !if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
-  let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc, $ssamp$dmask$dim$unorm"
-                    #"$cpol$r128$a16$tfe$lwe"
-                    #!if(BaseOpcode.HasD16, "$d16", "");
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<AddrIns, BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, " $vdata, "#AddrAsm, BaseOpcode.HasD16>.ret;
+}
+
+class MIMG_Sampler_noret_gfx10<mimgopc op, string opcode,
+                         RegisterClass AddrRC,
+                         string dns="">
+  : MIMG_gfx10<op.GFX10M, (outs), dns> {
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<(ins AddrRC:$vaddr0), BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, "off, $vaddr0", BaseOpcode.HasD16>.ret;
+  // Force vdata to VGPR0 as no result will be returned.
+  let vdata = 0;
+}
+
+class MIMG_Sampler_noret_nsa_gfx10<mimgopc op, string opcode,
+                         int num_addrs,
+                         string dns="">
+  : MIMG_nsa_gfx10<op.GFX10M, (outs), num_addrs, dns> {
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<AddrIns, BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, " off, "#AddrAsm, BaseOpcode.HasD16>.ret;
+  // Force vdata to VGPR0 as no result will be returned.
+  let vdata = 0;
 }
 
 class MIMG_Sampler_gfx11<mimgopc op, string opcode,
                          RegisterClass DataRC, RegisterClass AddrRC,
                          string dns="">
   : MIMG_gfx11<op.GFX11, (outs DataRC:$vdata), dns> {
-  let InOperandList = !con((ins AddrRC:$vaddr0, SReg_256:$srsrc, SReg_128:$ssamp,
-                                DMask:$dmask, Dim:$dim, UNorm:$unorm, CPol:$cpol,
-                                R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe),
-                           !if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
-  let AsmString = opcode#" $vdata, $vaddr0, $srsrc, $ssamp$dmask$dim$unorm"
-                    #"$cpol$r128$a16$tfe$lwe"
-                    #!if(BaseOpcode.HasD16, "$d16", "");
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<(ins AddrRC:$vaddr0), BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, "$vdata, $vaddr0", BaseOpcode.HasD16>.ret;
 }
 
 class MIMG_Sampler_nsa_gfx11<mimgopc op, string opcode,
@@ -1180,14 +1219,26 @@ class MIMG_Sampler_nsa_gfx11<mimgopc op, string opcode,
                              RegisterClass LastVAddrSize, string dns="">
   : MIMG_nsa_gfx11<op.GFX11, (outs DataRC:$vdata), num_addrs, dns, [],
                    LastVAddrSize> {
-  let InOperandList = !con(AddrIns,
-                           (ins SReg_256:$srsrc, SReg_128:$ssamp, DMask:$dmask,
-                                Dim:$dim, UNorm:$unorm, CPol:$cpol,
-                                R128A16:$r128, A16:$a16, TFE:$tfe, LWE:$lwe),
-                           !if(BaseOpcode.HasD16, (ins D16:$d16), (ins)));
-  let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc, $ssamp$dmask$dim$unorm"
-                    #"$cpol$r128$a16$tfe$lwe"
-                    #!if(BaseOpcode.HasD16, "$d16", "");
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<AddrIns, BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, " $vdata, "#AddrAsm, BaseOpcode.HasD16>.ret;
+}
+
+class MIMG_Sampler_noret_gfx11<mimgopc op, string opcode,
+                                  RegisterClass AddrRC,
+                                  string dns="">
+  : MIMG_gfx11<op.GFX11, (outs), dns> {
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<(ins AddrRC:$vaddr0), BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, "off, $vaddr0", BaseOpcode.HasD16>.ret;
+  let vdata = 0;
+}
+
+class MIMG_Sampler_noret_nsa_gfx11<mimgopc op, string opcode,
+                                      int num_addrs,
+                                      RegisterClass LastVAddrSize, string dns="">
+  : MIMG_nsa_gfx11<op.GFX11, (outs), num_addrs, dns, [], LastVAddrSize> {
+  let InOperandList = MIMG_Sampler_OpList_gfx10p<AddrIns, BaseOpcode.HasD16>.ret;
+  let AsmString = MIMG_Sampler_Asm_gfx10p<opcode, "off, "#AddrAsm, BaseOpcode.HasD16>.ret;
+  let vdata = 0;
 }
 
 class MIMGAddrSize<int dw, bit enable_disasm, int AddrDW = dw> {
@@ -1365,6 +1416,57 @@ class MIMG_Sampler_BaseOpcode<AMDGPUSampleVariant sample>
   let LodOrClampOrMip = !ne(sample.LodOrClamp, "");
 }
 
+multiclass MIMG_Sampler_NoReturn <mimgopc op, AMDGPUSampleVariant sample, bit wqm = 0, bit isG16, string asm> {
+  def "" : MIMG_Sampler_BaseOpcode<sample> {
+    let HasD16 = 1;
+    let G16 = isG16;
+    let NoReturn = 1;
+  }
+
+  let BaseOpcode = !cast<MIMGBaseOpcode>(NAME), WQM = wqm,
+      mayLoad = 1, mayStore = 1, VDataDwords = 0 in {
+    foreach addr = MIMG_Sampler_AddrSizes<sample, isG16>.MachineInstrs in {
+      let VAddrDwords = addr.NumWords in {
+        if op.HAS_GFX10M then {
+          def _V # addr.NumWords # _gfx10
+            : MIMG_Sampler_noret_gfx10 <op, asm, addr.RegClass>;
+        }
+        if op.HAS_GFX11 then {
+          def _V # addr.NumWords # _gfx11
+            : MIMG_Sampler_noret_gfx11 <op, asm, addr.RegClass>;
+        }
+      }
+    }
+
+    foreach addr = MIMG_Sampler_AddrSizes<sample, isG16>.NSAInstrs in {
+      let VAddrDwords = addr.NumWords in {
+        if op.HAS_GFX10M then {
+          def _V # addr.NumWords # _nsa_gfx10
+            : MIMG_Sampler_noret_nsa_gfx10<op, asm, addr.NumWords>;
+        }
+      }
+    }
+
+    foreach addr = MIMG_Sampler_AddrSizes<sample, isG16, 5/*MaxNSASize*/>.PartialNSAInstrs in {
+      let VAddrDwords = addr.NumWords in {
+        if op.HAS_GFX11 then {
+          def _V # addr.NumWords # _nsa_gfx11
+            : MIMG_Sampler_noret_nsa_gfx11<op, asm, addr.NumWords, addr.RegClass>;
+        }
+      }
+    }
+
+    foreach addr = MIMG_Sampler_AddrSizes<sample, isG16, 4/*MaxNSASize*/, 1>.PartialNSAInstrs in {
+      let VAddrDwords = addr.NumWords in {
+        if op.HAS_GFX12 then {
+          def _V # addr.NumWords # _gfx12
+            : VSAMPLE_Sampler_noret_gfx12<op, asm, addr.NumWords, addr.RegClass>;
+        }
+      }
+    }
+  }
+}
+
 multiclass MIMG_Sampler <mimgopc op, AMDGPUSampleVariant sample, bit wqm = 0,
                          bit isG16 = 0, bit isGetLod = 0,
                          string asm = "image_sample"#sample.LowerCaseMod#!if(isG16, "_g16", ""),
@@ -1387,6 +1489,9 @@ multiclass MIMG_Sampler <mimgopc op, AMDGPUSampleVariant sample, bit wqm = 0,
     let VDataDwords = 5 in
     defm _V5 : MIMG_Sampler_Src_Helper<op, asm, sample, VReg_160, 0, ExtendedImageInst, isG16>;
   }
+
+  if !not(isGetLod) then
+  defm "_noret" : MIMG_Sampler_NoReturn <op, sample, wqm, isG16, asm>;
 }
 
 multiclass MIMG_Sampler_WQM <mimgopc op, AMDGPUSampleVariant sample>
@@ -1754,6 +1859,10 @@ def : MIMGLZMapping<IMAGE_GATHER4_L, IMAGE_GATHER4_LZ>;
 def : MIMGLZMapping<IMAGE_GATHER4_C_L, IMAGE_GATHER4_C_LZ>;
 def : MIMGLZMapping<IMAGE_GATHER4_L_O, IMAGE_GATHER4_LZ_O>;
 def : MIMGLZMapping<IMAGE_GATHER4_C_L_O, IMAGE_GATHER4_C_LZ_O>;
+def : MIMGLZMapping<IMAGE_SAMPLE_L_noret, IMAGE_SAMPLE_LZ_noret>;
+def : MIMGLZMapping<IMAGE_SAMPLE_C_L_noret, IMAGE_SAMPLE_C_LZ_noret>;
+def : MIMGLZMapping<IMAGE_SAMPLE_L_O_noret, IMAGE_SAMPLE_LZ_O_noret>;
+def : MIMGLZMapping<IMAGE_SAMPLE_C_L_O_noret, IMAGE_SAMPLE_C_LZ_O_noret>;
 
 // MIP to NONMIP Optimization Mapping
 def : MIMGMIPMapping<IMAGE_LOAD_MIP, IMAGE_LOAD>;
@@ -1776,6 +1885,14 @@ def : MIMGBiasMapping<IMAGE_GATHER4_B_O, IMAGE_GATHER4_O>;
 def : MIMGBiasMapping<IMAGE_GATHER4_B_CL_O, IMAGE_GATHER4_CL_O>;
 def : MIMGBiasMapping<IMAGE_GATHER4_C_B_O, IMAGE_GATHER4_C_O>;
 def : MIMGBiasMapping<IMAGE_GATHER4_C_B_CL_O, IMAGE_GATHER4_C_CL_O>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_B_noret, IMAGE_SAMPLE_noret>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_B_CL_noret, IMAGE_SAMPLE_CL_noret>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B_noret, IMAGE_SAMPLE_C_noret>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B_CL_noret, IMAGE_SAMPLE_C_CL_noret>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_B_O_noret, IMAGE_SAMPLE_O_noret>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_B_CL_O_noret, IMAGE_SAMPLE_CL_O_noret>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B_O_noret, IMAGE_SAMPLE_C_O_noret>;
+def : MIMGBiasMapping<IMAGE_SAMPLE_C_B_CL_O_noret, IMAGE_SAMPLE_C_CL_O_noret>;
 
 // Offset to NoOffset Optimization Mapping
 def : MIMGOffsetMapping<IMAGE_SAMPLE_O, IMAGE_SAMPLE>;
@@ -1818,6 +1935,34 @@ def : MIMGOffsetMapping<IMAGE_SAMPLE_CD_O_G16, IMAGE_SAMPLE_CD_G16>;
 def : MIMGOffsetMapping<IMAGE_SAMPLE_CD_CL_O_G16, IMAGE_SAMPLE_CD_CL_G16>;
 def : MIMGOffsetMapping<IMAGE_SAMPLE_C_CD_O_G16, IMAGE_SAMPLE_C_CD_G16>;
 def : MIMGOffsetMapping<IMAGE_SAMPLE_C_CD_CL_O_G16, IMAGE_SAMPLE_C_CD_CL_G16>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_O_noret, IMAGE_SAMPLE_noret>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_CL_O_noret, IMAGE_SAMPLE_CL_noret>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_O_noret, IMAGE_SAMPLE_D_noret>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_CL_O_noret, IMAGE_SAMPLE_D_CL_noret>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_O_G16_noret, IMAGE_SAMPLE_D_G16_noret>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_D_CL_O_G16_noret, IMAGE_SAMPLE_D_CL_G16_noret>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_L_O_noret, IMAGE_SAMPLE_L_noret>;
+def : MIMGOffsetMapping<IMAGE_SAMPLE_B_O_noret, IMAGE_SAMPLE_B_noret>;
+def : MIMGOffsetMappi...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/97542


More information about the llvm-commits mailing list