[llvm] r335607 - Account for undef values from predecessors in extendSegmentsToUses

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 26 07:37:16 PDT 2018


Author: kparzysz
Date: Tue Jun 26 07:37:16 2018
New Revision: 335607

URL: http://llvm.org/viewvc/llvm-project?rev=335607&view=rev
Log:
Account for undef values from predecessors in extendSegmentsToUses

It is legal for a PHI node not to have a live value in a predecessor
as long as the end of the predecessor is jointly dominated by an undef
value.

Added:
    llvm/trunk/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
Modified:
    llvm/trunk/include/llvm/CodeGen/LiveIntervals.h
    llvm/trunk/lib/CodeGen/LiveIntervals.cpp
    llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp
    llvm/trunk/lib/CodeGen/LiveRangeCalc.h

Modified: llvm/trunk/include/llvm/CodeGen/LiveIntervals.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/LiveIntervals.h?rev=335607&r1=335606&r2=335607&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/LiveIntervals.h (original)
+++ llvm/trunk/include/llvm/CodeGen/LiveIntervals.h Tue Jun 26 07:37:16 2018
@@ -462,6 +462,10 @@ class VirtRegMap;
     void computeRegUnitRange(LiveRange&, unsigned Unit);
     void computeVirtRegInterval(LiveInterval&);
 
+    using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
+    void extendSegmentsToUses(LiveRange &Segments,
+                              ShrinkToUsesWorkList &WorkList, unsigned Reg,
+                              LaneBitmask LaneMask);
 
     /// Helper function for repairIntervalsInRange(), walks backwards and
     /// creates/modifies live segments in \p LR to match the operands found.

Modified: llvm/trunk/lib/CodeGen/LiveIntervals.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveIntervals.cpp?rev=335607&r1=335606&r2=335607&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LiveIntervals.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveIntervals.cpp Tue Jun 26 07:37:16 2018
@@ -358,26 +358,40 @@ static void createSegmentsForValues(Live
   }
 }
 
-using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>;
-
-static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
-                                 ShrinkToUsesWorkList &WorkList,
-                                 const LiveRange &OldRange) {
+void LiveIntervals::extendSegmentsToUses(LiveRange &Segments,
+                                         ShrinkToUsesWorkList &WorkList,
+                                         unsigned Reg, LaneBitmask LaneMask) {
   // Keep track of the PHIs that are in use.
   SmallPtrSet<VNInfo*, 8> UsedPHIs;
   // Blocks that have already been added to WorkList as live-out.
   SmallPtrSet<const MachineBasicBlock*, 16> LiveOut;
 
+  auto getSubRange = [](const LiveInterval &I, LaneBitmask M)
+        -> const LiveRange& {
+    if (M.none())
+      return I;
+    for (const LiveInterval::SubRange &SR : I.subranges()) {
+      if ((SR.LaneMask & M).any()) {
+        assert(SR.LaneMask == M && "Expecting lane masks to match exactly");
+        return SR;
+      }
+    }
+    llvm_unreachable("Subrange for mask not found");
+  };
+
+  const LiveInterval &LI = getInterval(Reg);
+  const LiveRange &OldRange = getSubRange(LI, LaneMask);
+
   // Extend intervals to reach all uses in WorkList.
   while (!WorkList.empty()) {
     SlotIndex Idx = WorkList.back().first;
     VNInfo *VNI = WorkList.back().second;
     WorkList.pop_back();
-    const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot());
-    SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB);
+    const MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Idx.getPrevSlot());
+    SlotIndex BlockStart = Indexes->getMBBStartIdx(MBB);
 
     // Extend the live range for VNI to be live at Idx.
-    if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) {
+    if (VNInfo *ExtVNI = Segments.extendInBlock(BlockStart, Idx)) {
       assert(ExtVNI == VNI && "Unexpected existing value number");
       (void)ExtVNI;
       // Is this a PHIDef we haven't seen before?
@@ -388,7 +402,7 @@ static void extendSegmentsToUses(LiveRan
       for (const MachineBasicBlock *Pred : MBB->predecessors()) {
         if (!LiveOut.insert(Pred).second)
           continue;
-        SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
+        SlotIndex Stop = Indexes->getMBBEndIdx(Pred);
         // A predecessor is not required to have a live-out value for a PHI.
         if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
           WorkList.push_back(std::make_pair(Stop, PVNI));
@@ -398,16 +412,28 @@ static void extendSegmentsToUses(LiveRan
 
     // VNI is live-in to MBB.
     LLVM_DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
-    LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
+    Segments.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
 
     // Make sure VNI is live-out from the predecessors.
     for (const MachineBasicBlock *Pred : MBB->predecessors()) {
       if (!LiveOut.insert(Pred).second)
         continue;
-      SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
-      assert(OldRange.getVNInfoBefore(Stop) == VNI &&
-             "Wrong value out of predecessor");
-      WorkList.push_back(std::make_pair(Stop, VNI));
+      SlotIndex Stop = Indexes->getMBBEndIdx(Pred);
+      if (VNInfo *OldVNI = OldRange.getVNInfoBefore(Stop)) {
+        assert(OldVNI == VNI && "Wrong value out of predecessor");
+        WorkList.push_back(std::make_pair(Stop, VNI));
+      } else {
+#ifndef NDEBUG
+        // There was no old VNI. Verify that Stop is jointly dominated
+        // by <undef>s for this live range.
+        assert(LaneMask.any() &&
+               "Missing value out of predecessor for main range");
+        SmallVector<SlotIndex,8> Undefs;
+        LI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
+        assert(LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes) &&
+               "Missing value out of predecessor for subrange");
+#endif
+      }
     }
   }
 }
@@ -460,7 +486,7 @@ bool LiveIntervals::shrinkToUses(LiveInt
   // Create new live ranges with only minimal live segments per def.
   LiveRange NewLR;
   createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end()));
-  extendSegmentsToUses(NewLR, *Indexes, WorkList, *li);
+  extendSegmentsToUses(NewLR, WorkList, Reg, LaneBitmask::getNone());
 
   // Move the trimmed segments back.
   li->segments.swap(NewLR.segments);
@@ -558,7 +584,7 @@ void LiveIntervals::shrinkToUses(LiveInt
   // Create a new live ranges with only minimal live segments per def.
   LiveRange NewLR;
   createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end()));
-  extendSegmentsToUses(NewLR, *Indexes, WorkList, SR);
+  extendSegmentsToUses(NewLR, WorkList, Reg, SR.LaneMask);
 
   // Move the trimmed ranges back.
   SR.segments.swap(NewLR.segments);

Modified: llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp?rev=335607&r1=335606&r2=335607&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp (original)
+++ llvm/trunk/lib/CodeGen/LiveRangeCalc.cpp Tue Jun 26 07:37:16 2018
@@ -584,3 +584,24 @@ void LiveRangeCalc::updateSSA() {
     }
   } while (Changed);
 }
+
+bool LiveRangeCalc::isJointlyDominated(const MachineBasicBlock *MBB,
+                                       ArrayRef<SlotIndex> Defs,
+                                       const SlotIndexes &Indexes) {
+  const MachineFunction &MF = *MBB->getParent();
+  BitVector DefBlocks(MF.getNumBlockIDs());
+  for (SlotIndex I : Defs)
+    DefBlocks.set(Indexes.getMBBFromIndex(I)->getNumber());
+
+  SetVector<unsigned> PredQueue;
+  PredQueue.insert(MBB->getNumber());
+  for (unsigned i = 0; i != PredQueue.size(); ++i) {
+    unsigned BN = PredQueue[i];
+    if (DefBlocks[BN])
+      return true;
+    const MachineBasicBlock *B = MF.getBlockNumbered(BN);
+    for (const MachineBasicBlock *P : B->predecessors())
+      PredQueue.insert(P->getNumber());
+  }
+  return false;
+}

Modified: llvm/trunk/lib/CodeGen/LiveRangeCalc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/LiveRangeCalc.h?rev=335607&r1=335606&r2=335607&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/LiveRangeCalc.h (original)
+++ llvm/trunk/lib/CodeGen/LiveRangeCalc.h Tue Jun 26 07:37:16 2018
@@ -282,6 +282,15 @@ public:
   /// Every predecessor of a live-in block must have been given a value with
   /// setLiveOutValue, the value may be null for live-trough blocks.
   void calculateValues();
+
+  /// A diagnostic function to check if the end of the block @p MBB is
+  /// jointly dominated by the blocks corresponding to the slot indices
+  /// in @p Defs. This function is mainly for use in self-verification
+  /// checks.
+  LLVM_ATTRIBUTE_UNUSED
+  static bool isJointlyDominated(const MachineBasicBlock *MBB,
+                                 ArrayRef<SlotIndex> Defs,
+                                 const SlotIndexes &Indexes);
 };
 
 } // end namespace llvm

Added: llvm/trunk/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir?rev=335607&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/subreg-split-live-in-error.mir Tue Jun 26 07:37:16 2018
@@ -0,0 +1,273 @@
+# RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-vgpr-index-mode -run-pass=greedy -stress-regalloc=16 -o - %s | FileCheck -check-prefixes=GCN %s
+
+# An interval for a register that was partially defined was split, creating
+# a new use (a COPY) which was reached by the undef point. In particular,
+# there was a subrange of the new register which was reached by an "undef"
+# point. When the code in extendSegmentsToUses verified value numbers between
+# the new and the old live ranges, it did not account for this kind of a
+# situation and asserted expecting the old value to exist. For a PHI node
+# it is legal to have a missing predecessor value as long as the end of
+# the predecessor is jointly dominated by the undefs.
+#
+# A simplified form of this can be illustrated as
+#
+# bb.1:
+#   %0:vreg_64 = IMPLICIT_DEF
+#   ...
+#   S_CBRANCH_SCC1 %bb.2, implicit $vcc
+#   S_BRANCH %bb.3
+#
+# bb.2:
+# ; predecessors: %bb.1, %bb.4
+#   dead %1:vreg_64 = COPY %0:vreg_64 ; This is the point of the inserted split
+#   ...
+#   S_BRANCH %bb.5
+#
+# bb.3:
+# ; predecessors: %bb.1
+#   undef %0.sub0:vreg_64 = COPY %123:sreg_32 ; undef point for %0.sub1
+#   ...
+#   S_BRANCH %bb.4
+#
+# bb.4
+# ; predecessors: %bb.4
+#   ...
+#   S_BRANCH %bb.2
+#
+# This test exposes this scenario which caused previously caused an assert
+
+---
+name:            _amdgpu_ps_main
+tracksRegLiveness: true
+liveins:
+  - { reg: '$vgpr2', virtual-reg: '%0' }
+  - { reg: '$vgpr3', virtual-reg: '%1' }
+  - { reg: '$vgpr4', virtual-reg: '%2' }
+body: |
+  bb.0:
+    successors: %bb.1(0x40000000), %bb.2(0x40000000)
+    liveins: $vgpr2, $vgpr3, $vgpr4
+    %2:vgpr_32 = COPY $vgpr4
+    %1:vgpr_32 = COPY $vgpr3
+    %0:vgpr_32 = COPY $vgpr2
+    S_CBRANCH_SCC0 %bb.2, implicit undef $scc
+
+  bb.1:
+    successors: %bb.5(0x80000000)
+    undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
+    %3.sub1:vreg_128 = COPY %3.sub0
+    %3.sub2:vreg_128 = COPY %3.sub0
+    S_BRANCH %bb.5
+
+  bb.2:
+    successors: %bb.3(0x40000000), %bb.4(0x40000000)
+    S_CBRANCH_SCC0 %bb.4, implicit undef $scc
+
+  bb.3:
+    successors: %bb.5(0x80000000)
+    undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
+    %3.sub1:vreg_128 = COPY %3.sub0
+    S_BRANCH %bb.5
+
+  bb.4:
+    successors: %bb.5(0x80000000)
+    %3:vreg_128 = IMPLICIT_DEF
+
+  bb.5:
+    successors: %bb.6(0x40000000), %bb.22(0x40000000)
+    %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    S_CBRANCH_SCC1 %bb.22, implicit undef $scc
+    S_BRANCH %bb.6
+
+  bb.6:
+    successors: %bb.8(0x40000000), %bb.11(0x40000000)
+    %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    dead %6:vgpr_32 = V_MUL_F32_e32 0, undef %7:vgpr_32, implicit $exec
+    dead %8:vgpr_32 = V_MUL_F32_e32 0, %2, implicit $exec
+    undef %9.sub1:vreg_64 = V_MUL_F32_e32 0, %1, implicit $exec
+    undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $exec
+    undef %11.sub0:sreg_256 = S_MOV_B32 0
+    %11.sub1:sreg_256 = COPY %11.sub0
+    %11.sub2:sreg_256 = COPY %11.sub0
+    %11.sub3:sreg_256 = COPY %11.sub0
+    %11.sub4:sreg_256 = COPY %11.sub0
+    %11.sub5:sreg_256 = COPY %11.sub0
+    %11.sub6:sreg_256 = COPY %11.sub0
+    %11.sub7:sreg_256 = COPY %11.sub0
+    %12:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %9, %11, undef %13:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
+    %14:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
+    %15:vreg_128 = IMPLICIT_DEF
+    S_CBRANCH_SCC1 %bb.8, implicit undef $scc
+    S_BRANCH %bb.11
+
+  bb.7:
+    successors: %bb.13(0x80000000)
+    undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
+    %15.sub1:vreg_128 = COPY %15.sub0
+    %15.sub2:vreg_128 = COPY %15.sub0
+    %5:vgpr_32 = IMPLICIT_DEF
+    S_BRANCH %bb.13
+
+  bb.8:
+    successors: %bb.9(0x40000000), %bb.10(0x40000000)
+    S_CBRANCH_SCC0 %bb.10, implicit undef $scc
+
+  bb.9:
+    successors: %bb.12(0x80000000)
+    undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
+    %15.sub1:vreg_128 = COPY %15.sub0
+    %15.sub2:vreg_128 = COPY %15.sub0
+    S_BRANCH %bb.12
+
+  bb.10:
+    successors: %bb.12(0x80000000)
+    undef %15.sub0:vreg_128 = V_MOV_B32_e32 2143289344, implicit $exec
+    %15.sub1:vreg_128 = COPY %15.sub0
+    %15.sub2:vreg_128 = COPY %15.sub0
+    S_BRANCH %bb.12
+
+  bb.11:
+    successors: %bb.7(0x40000000), %bb.13(0x40000000)
+    %16:sreg_64 = V_CMP_NE_U32_e64 0, %14, implicit $exec
+    %17:sreg_64 = S_AND_B64 $exec, %16, implicit-def dead $scc
+    $vcc = COPY %17
+    S_CBRANCH_VCCNZ %bb.7, implicit $vcc
+    S_BRANCH %bb.13
+
+  bb.12:
+    successors: %bb.11(0x80000000)
+    %14:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %5:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
+    S_BRANCH %bb.11
+
+  bb.13:
+    successors: %bb.15(0x40000000), %bb.14(0x40000000)
+
+    ; In reality we are checking that this code doesn't assert when splitting
+    ; and inserting a spill. Here we just check that the point where the error
+    ; occurs we see a correctly generated spill.
+    ; GCN-LABEL: bb.13:
+    ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec
+
+    %18:vgpr_32 = V_MAD_F32 0, %10.sub0, 0, target-flags(amdgpu-gotprel) 1073741824, 0, -1082130432, 0, 0, implicit $exec
+    %19:vgpr_32 = V_MAD_F32 0, %12.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $exec
+    %20:sreg_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %21:sreg_128, 1040, 0 :: (dereferenceable invariant load 16)
+    %22:vgpr_32 = V_ADD_F32_e32 0, %19, implicit $exec
+    %23:vgpr_32 = V_MAD_F32 0, %18, 0, 0, 0, 0, 0, 0, implicit $exec
+    %24:vgpr_32 = COPY %20.sub3
+    %25:vgpr_32 = V_MUL_F32_e64 0, target-flags(amdgpu-gotprel32-lo) 0, 0, %20.sub1, 0, 0, implicit $exec
+    %26:sreg_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %27:sreg_128, 1056, 0 :: (dereferenceable invariant load 16)
+    %28:vgpr_32 = V_MAD_F32 0, %18, 0, %26.sub0, 0, 0, 0, 0, implicit $exec
+    %29:vgpr_32 = V_ADD_F32_e32 %28, %19, implicit $exec
+    %30:vgpr_32 = V_RCP_F32_e32 %29, implicit $exec
+    %25:vgpr_32 = V_MAC_F32_e32 0, %18, %25, implicit $exec
+    %31:vgpr_32 = V_MAD_F32 0, target-flags(amdgpu-gotprel) 0, 0, %12.sub0, 0, %24, 0, 0, implicit $exec
+    %32:vgpr_32 = V_ADD_F32_e32 %25, %31, implicit $exec
+    %33:vgpr_32 = V_MUL_F32_e32 %22, %30, implicit $exec
+    %34:vgpr_32 = V_MUL_F32_e32 %23, %30, implicit $exec
+    %35:vgpr_32 = V_MUL_F32_e32 %32, %30, implicit $exec
+    %36:vgpr_32 = V_MUL_F32_e32 0, %34, implicit $exec
+    %36:vgpr_32 = V_MAC_F32_e32 0, %33, %36, implicit $exec
+    %37:vgpr_32 = V_MAD_F32 0, %35, 0, 0, 0, 0, 0, 0, implicit $exec
+    %38:sreg_64_xexec = V_CMP_NE_U32_e64 0, %5, implicit $exec
+    %39:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %38, implicit $exec
+    V_CMP_NE_U32_e32 1, %39, implicit-def $vcc, implicit $exec
+    $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
+    %40:vgpr_32 = V_ADD_F32_e32 %36, %37, implicit $exec
+    S_CBRANCH_VCCZ %bb.15, implicit $vcc
+
+  bb.14:
+    successors: %bb.17(0x80000000)
+    S_BRANCH %bb.17
+
+  bb.15:
+    successors: %bb.16(0x40000000), %bb.18(0x40000000)
+    %41:vgpr_32 = V_MAD_F32 0, %40, 0, 0, 0, 0, 0, 0, implicit $exec
+    %42:sreg_64 = V_CMP_LE_F32_e64 0, 0, 0, %41, 0, implicit $exec
+    %43:sreg_64 = V_CMP_GE_F32_e64 0, 1065353216, 0, %41, 0, implicit $exec
+    %44:sreg_64 = S_AND_B64 %43, %43, implicit-def dead $scc
+    %45:sreg_64 = S_AND_B64 %42, %42, implicit-def dead $scc
+    %46:sreg_64 = S_AND_B64 %45, %44, implicit-def dead $scc
+    %47:sreg_64 = COPY $exec, implicit-def $exec
+    %48:sreg_64 = S_AND_B64 %47, %46, implicit-def dead $scc
+    $exec = S_MOV_B64_term %48
+    SI_MASK_BRANCH %bb.18, implicit $exec
+    S_BRANCH %bb.16
+
+  bb.16:
+    successors: %bb.18(0x80000000)
+    S_BRANCH %bb.18
+
+  bb.17:
+    successors: %bb.21(0x40000000), %bb.23(0x40000000)
+    %49:sreg_64 = V_CMP_NE_U32_e64 0, %5, implicit $exec
+    %50:sreg_64 = S_AND_B64 $exec, %49, implicit-def dead $scc
+    %51:vreg_128 = IMPLICIT_DEF
+    $vcc = COPY %50
+    S_CBRANCH_VCCNZ %bb.21, implicit $vcc
+    S_BRANCH %bb.23
+
+  bb.18:
+    successors: %bb.20(0x40000000), %bb.19(0x40000000)
+    $exec = S_OR_B64 $exec, %47, implicit-def $scc
+    %52:vgpr_32 = V_MAD_F32 0, %3.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 1, %3.sub0, 0, 0, implicit $exec
+    %53:vgpr_32 = V_MUL_F32_e32 -2147483648, %3.sub1, implicit $exec
+    %53:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel32-hi) 1065353216, %3.sub2, %53, implicit $exec
+    %54:vgpr_32 = V_MUL_F32_e32 %53, %53, implicit $exec
+    %54:vgpr_32 = V_MAC_F32_e32 %52, %52, %54, implicit $exec
+    %55:vgpr_32 = V_SQRT_F32_e32 %54, implicit $exec
+    %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    %56:vgpr_32 = V_MOV_B32_e32 981668463, implicit $exec
+    %57:sreg_64 = V_CMP_NGT_F32_e64 0, %55, 0, %56, 0, implicit $exec
+    %58:sreg_64 = S_AND_B64 $exec, %57, implicit-def dead $scc
+    $vcc = COPY %58
+    S_CBRANCH_VCCZ %bb.20, implicit $vcc
+
+  bb.19:
+    successors: %bb.17(0x80000000)
+    S_BRANCH %bb.17
+
+  bb.20:
+    successors: %bb.17(0x80000000)
+    S_BRANCH %bb.17
+
+  bb.21:
+    successors: %bb.23(0x80000000)
+    %59:sreg_32 = S_MOV_B32 0
+    undef %51.sub0:vreg_128 = COPY %59
+    S_BRANCH %bb.23
+
+  bb.22:
+    successors: %bb.24(0x80000000)
+    S_BRANCH %bb.24
+
+  bb.23:
+    successors: %bb.22(0x80000000)
+    undef %60.sub1:vreg_64 = V_CVT_I32_F32_e32 %1, implicit $exec
+    %60.sub0:vreg_64 = V_CVT_I32_F32_e32 %0, implicit $exec
+    undef %61.sub0:sreg_256 = S_MOV_B32 0
+    %61.sub1:sreg_256 = COPY %61.sub0
+    %61.sub2:sreg_256 = COPY %61.sub0
+    %61.sub3:sreg_256 = COPY %61.sub0
+    %61.sub4:sreg_256 = COPY %61.sub0
+    %61.sub5:sreg_256 = COPY %61.sub0
+    %61.sub6:sreg_256 = COPY %61.sub0
+    %61.sub7:sreg_256 = COPY %61.sub0
+    %62:vgpr_32 = V_MOV_B32_e32 1033100696, implicit $exec
+    %63:vgpr_32 = V_MUL_F32_e32 1060575065, %15.sub1, implicit $exec
+    %63:vgpr_32 = V_MAC_F32_e32 1046066128, %15.sub0, %63, implicit $exec
+    %64:vgpr_32 = IMAGE_LOAD_V1_V2 %60, %61, 1, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4)
+    %64:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel) 0, %51.sub0, %64, implicit $exec
+    %65:vgpr_32 = V_MUL_F32_e32 0, %64, implicit $exec
+    %66:vgpr_32 = V_MUL_F32_e32 0, %65, implicit $exec
+    %67:vgpr_32 = V_MAD_F32 0, %66, 0, %62, 0, 0, 0, 0, implicit $exec
+    %63:vgpr_32 = V_MAC_F32_e32 %15.sub2, %62, %63, implicit $exec
+    %4:vgpr_32 = V_ADD_F32_e32 %63, %67, implicit $exec
+    S_BRANCH %bb.22
+
+  bb.24:
+    %68:vgpr_32 = V_MUL_F32_e32 0, %4, implicit $exec
+    %69:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, undef %70:vgpr_32, 0, %68, 0, implicit $exec
+    EXP 0, undef %71:vgpr_32, %69, undef %72:vgpr_32, undef %73:vgpr_32, -1, -1, 15, implicit $exec
+    S_ENDPGM
+...




More information about the llvm-commits mailing list