[llvm] r312725 - AMDGPU: Handle more than one memory operand in SIMemoryLegalizer

Konstantin Zhuravlyov via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 7 09:14:21 PDT 2017


Author: kzhuravl
Date: Thu Sep  7 09:14:21 2017
New Revision: 312725

URL: http://llvm.org/viewvc/llvm-project?rev=312725&view=rev
Log:
AMDGPU: Handle more than one memory operand in SIMemoryLegalizer

Differential Revision: https://reviews.llvm.org/D37397

Added:
    llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
    llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h?rev=312725&r1=312724&r2=312725&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h Thu Sep  7 09:14:21 2017
@@ -16,6 +16,8 @@
 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUMACHINEMODULEINFO_H
 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUMACHINEMODULEINFO_H
 
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
 #include "llvm/CodeGen/MachineModuleInfo.h"
 #include "llvm/CodeGen/MachineModuleInfoImpls.h"
 #include "llvm/IR/LLVMContext.h"
@@ -35,6 +37,27 @@ private:
   /// \brief Wavefront synchronization scope ID.
   SyncScope::ID WavefrontSSID;
 
+  /// \brief In AMDGPU target synchronization scopes are inclusive, meaning a
+  /// larger synchronization scope is inclusive of a smaller synchronization
+  /// scope.
+  ///
+  /// \returns \p SSID's inclusion ordering, or "None" if \p SSID is not
+  /// supported by the AMDGPU target.
+  Optional<uint8_t> getSyncScopeInclusionOrdering(SyncScope::ID SSID) const {
+    if (SSID == SyncScope::SingleThread)
+      return 0;
+    else if (SSID == getWavefrontSSID())
+      return 1;
+    else if (SSID == getWorkgroupSSID())
+      return 2;
+    else if (SSID == getAgentSSID())
+      return 3;
+    else if (SSID == SyncScope::System)
+      return 4;
+
+    return None;
+  }
+
 public:
   AMDGPUMachineModuleInfo(const MachineModuleInfo &MMI);
 
@@ -50,6 +73,23 @@ public:
   SyncScope::ID getWavefrontSSID() const {
     return WavefrontSSID;
   }
+
+  /// \brief In AMDGPU target synchronization scopes are inclusive, meaning a
+  /// larger synchronization scope is inclusive of a smaller synchronization
+  /// scope.
+  ///
+  /// \returns True if synchronization scope \p A is larger than or equal to
+  /// synchronization scope \p B, false if synchronization scope \p A is smaller
+  /// than synchronization scope \p B, or "None" if either synchronization scope
+  /// \p A or \p B is not supported by the AMDGPU target.
+  Optional<bool> isSyncScopeInclusion(SyncScope::ID A, SyncScope::ID B) const {
+    const auto &AIO = getSyncScopeInclusionOrdering(A);
+    const auto &BIO = getSyncScopeInclusionOrdering(B);
+    if (!AIO || !BIO)
+      return None;
+
+    return AIO.getValue() > BIO.getValue();
+  }
 };
 
 } // end namespace llvm

Modified: llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp?rev=312725&r1=312724&r2=312725&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIMemoryLegalizer.cpp Thu Sep  7 09:14:21 2017
@@ -60,6 +60,11 @@ private:
               AtomicOrdering FailureOrdering)
       : SSID(SSID), Ordering(Ordering), FailureOrdering(FailureOrdering) {}
 
+  /// \returns Info constructed from \p MI, which has at least machine memory
+  /// operand.
+  static Optional<SIMemOpInfo> constructFromMIWithMMO(
+      const MachineBasicBlock::iterator &MI);
+
 public:
   /// \returns Synchronization scope ID of the machine instruction used to
   /// create this SIMemOpInfo.
@@ -101,13 +106,15 @@ public:
   /// "None" otherwise.
   static Optional<SIMemOpInfo> getAtomicRmwInfo(
       const MachineBasicBlock::iterator &MI);
+
+  /// \brief Reports unknown synchronization scope used in \p MI to LLVM
+  /// context.
+  static void reportUnknownSyncScope(
+      const MachineBasicBlock::iterator &MI);
 };
 
 class SIMemoryLegalizer final : public MachineFunctionPass {
 private:
-  /// \brief LLVM context.
-  LLVMContext *CTX = nullptr;
-
   /// \brief Machine module info.
   const AMDGPUMachineModuleInfo *MMI = nullptr;
 
@@ -140,10 +147,6 @@ private:
   /// function. Returns true if current function is modified, false otherwise.
   bool removeAtomicPseudoMIs();
 
-  /// \brief Reports unknown synchronization scope used in \p MI to LLVM
-  /// context.
-  void reportUnknownSynchScope(const MachineBasicBlock::iterator &MI);
-
   /// \brief Expands load operation \p MI. Returns true if instructions are
   /// added/deleted or \p MI is modified, false otherwise.
   bool expandLoad(const SIMemOpInfo &MOI,
@@ -185,18 +188,54 @@ public:
 } // end namespace anonymous
 
 /* static */
+Optional<SIMemOpInfo> SIMemOpInfo::constructFromMIWithMMO(
+    const MachineBasicBlock::iterator &MI) {
+  assert(MI->getNumMemOperands() > 0);
+
+  const MachineFunction *MF = MI->getParent()->getParent();
+  const AMDGPUMachineModuleInfo *MMI =
+      &MF->getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>();
+
+  SyncScope::ID SSID = SyncScope::SingleThread;
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+  AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
+
+  // Validator should check whether or not MMOs cover the entire set of
+  // locations accessed by the memory instruction.
+  for (const auto &MMO : MI->memoperands()) {
+    const auto &IsSyncScopeInclusion =
+        MMI->isSyncScopeInclusion(SSID, MMO->getSyncScopeID());
+    if (!IsSyncScopeInclusion) {
+      reportUnknownSyncScope(MI);
+      return None;
+    }
+
+    SSID = IsSyncScopeInclusion.getValue() ? SSID : MMO->getSyncScopeID();
+    Ordering =
+        isStrongerThan(Ordering, MMO->getOrdering()) ?
+            Ordering : MMO->getOrdering();
+    FailureOrdering =
+        isStrongerThan(FailureOrdering, MMO->getFailureOrdering()) ?
+            FailureOrdering : MMO->getFailureOrdering();
+  }
+
+  return SIMemOpInfo(SSID, Ordering, FailureOrdering);
+}
+
+/* static */
 Optional<SIMemOpInfo> SIMemOpInfo::getLoadInfo(
     const MachineBasicBlock::iterator &MI) {
   assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);
 
   if (!(MI->mayLoad() && !MI->mayStore()))
     return None;
-  if (!MI->hasOneMemOperand())
+
+  // Be conservative if there are no memory operands.
+  if (MI->getNumMemOperands() == 0)
     return SIMemOpInfo(SyncScope::System,
                        AtomicOrdering::SequentiallyConsistent);
 
-  const MachineMemOperand *MMO = *MI->memoperands_begin();
-  return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering());
+  return SIMemOpInfo::constructFromMIWithMMO(MI);
 }
 
 /* static */
@@ -206,12 +245,13 @@ Optional<SIMemOpInfo> SIMemOpInfo::getSt
 
   if (!(!MI->mayLoad() && MI->mayStore()))
     return None;
-  if (!MI->hasOneMemOperand())
+
+  // Be conservative if there are no memory operands.
+  if (MI->getNumMemOperands() == 0)
     return SIMemOpInfo(SyncScope::System,
                        AtomicOrdering::SequentiallyConsistent);
 
-  const MachineMemOperand *MMO = *MI->memoperands_begin();
-  return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering());
+  return SIMemOpInfo::constructFromMIWithMMO(MI);
 }
 
 /* static */
@@ -236,14 +276,14 @@ Optional<SIMemOpInfo> SIMemOpInfo::getAt
 
   if (!(MI->mayLoad() && MI->mayStore()))
     return None;
-  if (!MI->hasOneMemOperand())
+
+  // Be conservative if there are no memory operands.
+  if (MI->getNumMemOperands() == 0)
     return SIMemOpInfo(SyncScope::System,
                        AtomicOrdering::SequentiallyConsistent,
                        AtomicOrdering::SequentiallyConsistent);
 
-  const MachineMemOperand *MMO = *MI->memoperands_begin();
-  return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering(),
-                     MMO->getFailureOrdering());
+  return SIMemOpInfo::constructFromMIWithMMO(MI);
 }
 
 /* static */
@@ -253,12 +293,22 @@ Optional<SIMemOpInfo> SIMemOpInfo::getAt
 
   if (!(MI->mayLoad() && MI->mayStore()))
     return None;
-  if (!MI->hasOneMemOperand())
+
+  // Be conservative if there are no memory operands.
+  if (MI->getNumMemOperands() == 0)
     return SIMemOpInfo(SyncScope::System,
                        AtomicOrdering::SequentiallyConsistent);
 
-  const MachineMemOperand *MMO = *MI->memoperands_begin();
-  return SIMemOpInfo(MMO->getSyncScopeID(), MMO->getOrdering());
+  return SIMemOpInfo::constructFromMIWithMMO(MI);
+}
+
+/* static */
+void SIMemOpInfo::reportUnknownSyncScope(
+    const MachineBasicBlock::iterator &MI) {
+  DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(),
+                                 "Unsupported synchronization scope");
+  LLVMContext *CTX = &MI->getParent()->getParent()->getFunction()->getContext();
+  CTX->diagnose(Diag);
 }
 
 bool SIMemoryLegalizer::insertBufferWbinvl1Vol(MachineBasicBlock::iterator &MI,
@@ -317,13 +367,6 @@ bool SIMemoryLegalizer::removeAtomicPseu
   return true;
 }
 
-void SIMemoryLegalizer::reportUnknownSynchScope(
-    const MachineBasicBlock::iterator &MI) {
-  DiagnosticInfoUnsupported Diag(*MI->getParent()->getParent()->getFunction(),
-                                 "Unsupported synchronization scope");
-  CTX->diagnose(Diag);
-}
-
 bool SIMemoryLegalizer::expandLoad(const SIMemOpInfo &MOI,
                                    MachineBasicBlock::iterator &MI) {
   assert(MI->mayLoad() && !MI->mayStore());
@@ -347,14 +390,15 @@ bool SIMemoryLegalizer::expandLoad(const
       }
 
       return Changed;
-    } else if (MOI.getSSID() == SyncScope::SingleThread ||
-               MOI.getSSID() == MMI->getWorkgroupSSID() ||
-               MOI.getSSID() == MMI->getWavefrontSSID()) {
-      return Changed;
-    } else {
-      reportUnknownSynchScope(MI);
+    }
+
+    if (MOI.getSSID() == SyncScope::SingleThread ||
+        MOI.getSSID() == MMI->getWorkgroupSSID() ||
+        MOI.getSSID() == MMI->getWavefrontSSID()) {
       return Changed;
     }
+
+    llvm_unreachable("Unsupported synchronization scope");
   }
 
   return Changed;
@@ -374,14 +418,15 @@ bool SIMemoryLegalizer::expandStore(cons
         Changed |= insertWaitcntVmcnt0(MI);
 
       return Changed;
-    } else if (MOI.getSSID() == SyncScope::SingleThread ||
-               MOI.getSSID() == MMI->getWorkgroupSSID() ||
-               MOI.getSSID() == MMI->getWavefrontSSID()) {
-      return Changed;
-    } else {
-      reportUnknownSynchScope(MI);
+    }
+
+    if (MOI.getSSID() == SyncScope::SingleThread ||
+        MOI.getSSID() == MMI->getWorkgroupSSID() ||
+        MOI.getSSID() == MMI->getWavefrontSSID()) {
       return Changed;
     }
+
+    llvm_unreachable("Unsupported synchronization scope");
   }
 
   return Changed;
@@ -409,15 +454,16 @@ bool SIMemoryLegalizer::expandAtomicFenc
 
       AtomicPseudoMIs.push_back(MI);
       return Changed;
-    } else if (MOI.getSSID() == SyncScope::SingleThread ||
-               MOI.getSSID() == MMI->getWorkgroupSSID() ||
-               MOI.getSSID() == MMI->getWavefrontSSID()) {
+    }
+
+    if (MOI.getSSID() == SyncScope::SingleThread ||
+        MOI.getSSID() == MMI->getWorkgroupSSID() ||
+        MOI.getSSID() == MMI->getWavefrontSSID()) {
       AtomicPseudoMIs.push_back(MI);
       return Changed;
-    } else {
-      reportUnknownSynchScope(MI);
-      return Changed;
     }
+
+    SIMemOpInfo::reportUnknownSyncScope(MI);
   }
 
   return Changed;
@@ -448,15 +494,16 @@ bool SIMemoryLegalizer::expandAtomicCmpx
       }
 
       return Changed;
-    } else if (MOI.getSSID() == SyncScope::SingleThread ||
-               MOI.getSSID() == MMI->getWorkgroupSSID() ||
-               MOI.getSSID() == MMI->getWavefrontSSID()) {
+    }
+
+    if (MOI.getSSID() == SyncScope::SingleThread ||
+        MOI.getSSID() == MMI->getWorkgroupSSID() ||
+        MOI.getSSID() == MMI->getWavefrontSSID()) {
       Changed |= setGLC(MI);
       return Changed;
-    } else {
-      reportUnknownSynchScope(MI);
-      return Changed;
     }
+
+    llvm_unreachable("Unsupported synchronization scope");
   }
 
   return Changed;
@@ -484,15 +531,16 @@ bool SIMemoryLegalizer::expandAtomicRmw(
       }
 
       return Changed;
-    } else if (MOI.getSSID() == SyncScope::SingleThread ||
-               MOI.getSSID() == MMI->getWorkgroupSSID() ||
-               MOI.getSSID() == MMI->getWavefrontSSID()) {
+    }
+
+    if (MOI.getSSID() == SyncScope::SingleThread ||
+        MOI.getSSID() == MMI->getWorkgroupSSID() ||
+        MOI.getSSID() == MMI->getWavefrontSSID()) {
       Changed |= setGLC(MI);
       return Changed;
-    } else {
-      reportUnknownSynchScope(MI);
-      return Changed;
     }
+
+    llvm_unreachable("Unsupported synchronization scope");
   }
 
   return Changed;
@@ -503,7 +551,6 @@ bool SIMemoryLegalizer::runOnMachineFunc
   const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
   const IsaInfo::IsaVersion IV = IsaInfo::getIsaVersion(ST.getFeatureBits());
 
-  CTX = &MF.getFunction()->getContext();
   MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>();
   TII = ST.getInstrInfo();
 

Added: llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir?rev=312725&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir (added)
+++ llvm/trunk/test/CodeGen/MIR/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir Thu Sep  7 09:14:21 2017
@@ -0,0 +1,163 @@
+# RUN: llc -march=amdgcn -mcpu=gfx803 -run-pass si-memory-legalizer  %s -o - | FileCheck %s
+
+--- |
+  ; ModuleID = 'memory-legalizer-multiple-mem-operands.ll'
+  source_filename = "memory-legalizer-multiple-mem-operands.ll"
+  target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64"
+
+  define amdgpu_kernel void @multiple_mem_operands(i32 addrspace(1)* %out, i32 %cond, i32 %if_offset, i32 %else_offset) #0 {
+  entry:
+    %scratch0 = alloca [8192 x i32]
+    %scratch1 = alloca [8192 x i32]
+    %scratchptr01 = bitcast [8192 x i32]* %scratch0 to i32*
+    store i32 1, i32* %scratchptr01
+    %scratchptr12 = bitcast [8192 x i32]* %scratch1 to i32*
+    store i32 2, i32* %scratchptr12
+    %cmp = icmp eq i32 %cond, 0
+    br i1 %cmp, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0
+
+  if:                                               ; preds = %entry
+    %if_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch0, i32 0, i32 %if_offset, !amdgpu.uniform !0
+    %if_value = load atomic i32, i32* %if_ptr syncscope("workgroup") seq_cst, align 4
+    br label %done, !structurizecfg.uniform !0
+
+  else:                                             ; preds = %entry
+    %else_ptr = getelementptr [8192 x i32], [8192 x i32]* %scratch1, i32 0, i32 %else_offset, !amdgpu.uniform !0
+    %else_value = load atomic i32, i32* %else_ptr syncscope("agent") unordered, align 4
+    br label %done, !structurizecfg.uniform !0
+
+  done:                                             ; preds = %else, %if
+    %value = phi i32 [ %if_value, %if ], [ %else_value, %else ]
+    store i32 %value, i32 addrspace(1)* %out
+    ret void
+  }
+
+  ; Function Attrs: convergent nounwind
+  declare { i1, i64 } @llvm.amdgcn.if(i1) #1
+
+  ; Function Attrs: convergent nounwind
+  declare { i1, i64 } @llvm.amdgcn.else(i64) #1
+
+  ; Function Attrs: convergent nounwind readnone
+  declare i64 @llvm.amdgcn.break(i64) #2
+
+  ; Function Attrs: convergent nounwind readnone
+  declare i64 @llvm.amdgcn.if.break(i1, i64) #2
+
+  ; Function Attrs: convergent nounwind readnone
+  declare i64 @llvm.amdgcn.else.break(i64, i64) #2
+
+  ; Function Attrs: convergent nounwind
+  declare i1 @llvm.amdgcn.loop(i64) #1
+
+  ; Function Attrs: convergent nounwind
+  declare void @llvm.amdgcn.end.cf(i64) #1
+
+  attributes #0 = { "target-cpu"="gfx803" }
+  attributes #1 = { convergent nounwind }
+  attributes #2 = { convergent nounwind readnone }
+
+  !0 = !{}
+
+...
+---
+
+# CHECK-LABEL: name: multiple_mem_operands
+
+# CHECK-LABEL: bb.3.done:
+# CHECK:       S_WAITCNT 3952
+# CHECK-NEXT:  BUFFER_LOAD_DWORD_OFFEN
+# CHECK-NEXT:  S_WAITCNT 3952
+# CHECK-NEXT:  BUFFER_WBINVL1_VOL
+
+name:            multiple_mem_operands
+alignment:       0
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+tracksRegLiveness: true
+registers:
+liveins:
+  - { reg: '%sgpr0_sgpr1', virtual-reg: '' }
+  - { reg: '%sgpr3', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       65540
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:
+  - { id: 0, type: default, offset: 0, size: 4, alignment: 4, stack-id: 0,
+      isImmutable: false, isAliased: false, callee-saved-register: '' }
+stack:
+  - { id: 0, name: scratch0, type: default, offset: 4, size: 32768, alignment: 4,
+      stack-id: 0, callee-saved-register: '', local-offset: 0, di-variable: '',
+      di-expression: '', di-location: '' }
+  - { id: 1, name: scratch1, type: default, offset: 32772, size: 32768,
+      alignment: 4, stack-id: 0, callee-saved-register: '', local-offset: 32768,
+      di-variable: '', di-expression: '', di-location: '' }
+constants:
+body:             |
+  bb.0.entry:
+    successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000)
+    liveins: %sgpr0_sgpr1, %sgpr3
+
+    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    %sgpr8 = S_MOV_B32 $SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
+    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    %sgpr9 = S_MOV_B32 $SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
+    %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
+    %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
+    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01)
+    S_WAITCNT 127
+    S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc
+    S_WAITCNT 3855
+    %vgpr0 = V_MOV_B32_e32 2, implicit %exec
+    %vgpr1 = V_MOV_B32_e32 32772, implicit %exec
+    BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12)
+    S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc
+
+  bb.2.else:
+    successors: %bb.3.done(0x80000000)
+    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+
+    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    S_WAITCNT 3855
+    %vgpr0 = V_MOV_B32_e32 32772, implicit %exec
+    S_BRANCH %bb.3.done
+
+  bb.1.if:
+    successors: %bb.3.done(0x80000000)
+    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+
+    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    S_WAITCNT 3855
+    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
+
+  bb.3.done:
+    liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0
+
+    S_WAITCNT 127
+    %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc
+    %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec
+    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr)
+    %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5
+    %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec
+    S_WAITCNT 3952
+    FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out)
+    S_ENDPGM
+
+...




More information about the llvm-commits mailing list