[llvm] ad9eed1 - [MachineVerifier] Verify LiveIntervals for PHIs

Carl Ritson via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 18 02:14:52 PDT 2023


Author: Carl Ritson
Date: 2023-08-18T18:14:22+09:00
New Revision: ad9eed1e7735cf9f27d6323abeb1ff6651457968

URL: https://github.com/llvm/llvm-project/commit/ad9eed1e7735cf9f27d6323abeb1ff6651457968
DIFF: https://github.com/llvm/llvm-project/commit/ad9eed1e7735cf9f27d6323abeb1ff6651457968.diff

LOG: [MachineVerifier] Verify LiveIntervals for PHIs

Implement basic support for verifying LiveIntervals for PHIs.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D156872

Added: 
    llvm/test/CodeGen/AMDGPU/split-mbb-lis-subrange.mir

Modified: 
    llvm/lib/CodeGen/MachineVerifier.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp
index 7baac14f9d31cd..9baa5c86a5796b 100644
--- a/llvm/lib/CodeGen/MachineVerifier.cpp
+++ b/llvm/lib/CodeGen/MachineVerifier.cpp
@@ -2375,10 +2375,12 @@ void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
                                          const LiveRange &LR,
                                          Register VRegOrUnit,
                                          LaneBitmask LaneMask) {
+  const MachineInstr *MI = MO->getParent();
   LiveQueryResult LRQ = LR.Query(UseIdx);
+  bool HasValue = LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut());
   // Check if we have a segment at the use, note however that we only need one
   // live subregister range, the others may be dead.
-  if (!LRQ.valueIn() && LaneMask.none()) {
+  if (!HasValue && LaneMask.none()) {
     report("No live segment at use", MO, MONum);
     report_context_liverange(LR);
     report_context_vreg_regunit(VRegOrUnit);
@@ -2484,7 +2486,14 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
 
     // Check LiveInts liveness and kill.
     if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
-      SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
+      SlotIndex UseIdx;
+      if (MI->isPHI()) {
+        // PHI use occurs on the edge, so check for live out here instead.
+        UseIdx = LiveInts->getMBBEndIdx(
+          MI->getOperand(MONum + 1).getMBB()).getPrevSlot();
+      } else {
+        UseIdx = LiveInts->getInstructionIndex(*MI);
+      }
       // Check the cached regunit intervals.
       if (Reg.isPhysical() && !isReserved(Reg)) {
         for (MCRegUnit Unit : TRI->regunits(Reg.asMCReg())) {
@@ -2509,7 +2518,7 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
               continue;
             checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
             LiveQueryResult LRQ = SR.Query(UseIdx);
-            if (LRQ.valueIn())
+            if (LRQ.valueIn() || (MI->isPHI() && LRQ.valueOut()))
               LiveInMask |= SR.LaneMask;
           }
           // At least parts of the register has to be live at the use.
@@ -2518,6 +2527,12 @@ void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
             report_context(*LI);
             report_context(UseIdx);
           }
+          // For PHIs all lanes should be live
+          if (MI->isPHI() && LiveInMask != MOMask) {
+            report("Not all lanes of PHI source live at use", MO, MONum);
+            report_context(*LI);
+            report_context(UseIdx);
+          }
         }
       }
     }

diff  --git a/llvm/test/CodeGen/AMDGPU/split-mbb-lis-subrange.mir b/llvm/test/CodeGen/AMDGPU/split-mbb-lis-subrange.mir
new file mode 100644
index 00000000000000..9ee99a27dc84ff
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/split-mbb-lis-subrange.mir
@@ -0,0 +1,75 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs -run-pass liveintervals -o - %s | FileCheck -check-prefixes=GCN %s
+
+# This test simply checks that liveintervals pass verification.
+
+---
+name: split_critical_edge_subranges
+tracksRegLiveness: true
+body:             |
+  ; GCN-LABEL: name: split_critical_edge_subranges
+  ; GCN: bb.0:
+  ; GCN-NEXT:   successors: %bb.3(0x40000000), %bb.1(0x40000000)
+  ; GCN-NEXT: {{  $}}
+  ; GCN-NEXT:   %coord:vreg_64 = IMPLICIT_DEF
+  ; GCN-NEXT:   %desc:sgpr_256 = IMPLICIT_DEF
+  ; GCN-NEXT:   %c0:sreg_32 = IMPLICIT_DEF
+  ; GCN-NEXT:   %c1:sreg_32 = IMPLICIT_DEF
+  ; GCN-NEXT:   %const:vgpr_32 = IMPLICIT_DEF
+  ; GCN-NEXT:   %load:vreg_64 = IMAGE_LOAD_V2_V2_gfx11 %coord, %desc, 3, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 16, addrspace 4)
+  ; GCN-NEXT:   %s0a:vgpr_32 = COPY %load.sub0
+  ; GCN-NEXT:   %s0b:vgpr_32 = COPY %load.sub1
+  ; GCN-NEXT:   S_CMP_EQ_U32 %c0, %c1, implicit-def $scc
+  ; GCN-NEXT:   S_CBRANCH_SCC1 %bb.3, implicit $scc
+  ; GCN-NEXT:   S_BRANCH %bb.1
+  ; GCN-NEXT: {{  $}}
+  ; GCN-NEXT: bb.1:
+  ; GCN-NEXT:   successors: %bb.3(0x80000000)
+  ; GCN-NEXT: {{  $}}
+  ; GCN-NEXT:   %s0c:vgpr_32 = V_ADD_F32_e64 0, %s0a, 0, %const, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %s0d:vgpr_32 = V_ADD_F32_e64 0, %s0b, 0, %const, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   S_BRANCH %bb.3
+  ; GCN-NEXT: {{  $}}
+  ; GCN-NEXT: bb.2:
+  ; GCN-NEXT:   S_NOP 0
+  ; GCN-NEXT:   S_ENDPGM 0
+  ; GCN-NEXT: {{  $}}
+  ; GCN-NEXT: bb.3:
+  ; GCN-NEXT:   successors: %bb.4(0x80000000)
+  ; GCN-NEXT: {{  $}}
+  ; GCN-NEXT:   %phi0:vgpr_32 = PHI %s0a, %bb.0, %s0c, %bb.1
+  ; GCN-NEXT:   %phi1:vgpr_32 = PHI %s0b, %bb.0, %s0d, %bb.1
+  ; GCN-NEXT:   S_BRANCH %bb.4
+  ; GCN-NEXT: {{  $}}
+  ; GCN-NEXT: bb.4:
+  ; GCN-NEXT:   S_ENDPGM 0, implicit %phi0, implicit %phi1
+  bb.0:
+    %coord:vreg_64 = IMPLICIT_DEF
+    %desc:sgpr_256 = IMPLICIT_DEF
+    %c0:sreg_32 = IMPLICIT_DEF
+    %c1:sreg_32 = IMPLICIT_DEF
+    %const:vgpr_32 = IMPLICIT_DEF
+    %load:vreg_64 = IMAGE_LOAD_V2_V2_gfx11 %coord:vreg_64, killed %desc:sgpr_256, 3, 1, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s64), align 16, addrspace 4)
+    %s0a:vgpr_32 = COPY %load.sub0:vreg_64
+    %s0b:vgpr_32 = COPY %load.sub1:vreg_64
+    S_CMP_EQ_U32 killed %c0:sreg_32, killed %c1:sreg_32, implicit-def $scc
+    S_CBRANCH_SCC1 %bb.3, implicit $scc
+    S_BRANCH %bb.1
+
+  bb.1:
+    %s0c:vgpr_32 = V_ADD_F32_e64 0, %s0a:vgpr_32, 0, %const:vgpr_32, 0, 0, implicit $mode, implicit $exec
+    %s0d:vgpr_32 = V_ADD_F32_e64 0, %s0b:vgpr_32, 0, %const:vgpr_32, 0, 0, implicit $mode, implicit $exec
+    S_BRANCH %bb.3
+
+  bb.2:
+    S_NOP 0
+    S_ENDPGM 0
+
+  bb.3:
+    %phi0:vgpr_32 = PHI %s0a:vgpr_32, %bb.0, %s0c:vgpr_32, %bb.1
+    %phi1:vgpr_32 = PHI %s0b:vgpr_32, %bb.0, %s0d:vgpr_32, %bb.1
+    S_BRANCH %bb.4
+
+  bb.4:
+    S_ENDPGM 0, implicit %phi0:vgpr_32, implicit %phi1:vgpr_32
+...


        


More information about the llvm-commits mailing list