[llvm] AMDGPU: Implement getRequiredProperties for SIFoldOperands (PR #127522)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 17 08:54:19 PST 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-amdgpu
Author: Matt Arsenault (arsenm)
<details>
<summary>Changes</summary>
Fix the broken MIR tests violating isSSA.
---
Full diff: https://github.com/llvm/llvm-project/pull/127522.diff
3 Files Affected:
- (modified) llvm/lib/Target/AMDGPU/SIFoldOperands.cpp (+5)
- (modified) llvm/lib/Target/AMDGPU/SIFoldOperands.h (+5)
- (modified) llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir (+21-20)
``````````diff
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
index d8f3f9c54abc1..999553bfaff38 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp
@@ -166,6 +166,11 @@ class SIFoldOperandsLegacy : public MachineFunctionPass {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
};
} // End anonymous namespace.
diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.h b/llvm/lib/Target/AMDGPU/SIFoldOperands.h
index d6b8f6a729526..c419ec0911e20 100644
--- a/llvm/lib/Target/AMDGPU/SIFoldOperands.h
+++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.h
@@ -17,6 +17,11 @@ class SIFoldOperandsPass : public PassInfoMixin<SIFoldOperandsPass> {
SIFoldOperandsPass() = default;
PreservedAnalyses run(MachineFunction &MF,
MachineFunctionAnalysisManager &MFAM);
+
+ MachineFunctionProperties getRequiredProperties() const {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::IsSSA);
+ }
};
} // namespace llvm
diff --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index 3db2b6ed9ab4b..39b5076ebe5ac 100644
--- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -96,8 +96,8 @@ body: |
%12:vgpr_32 = V_AND_B32_e64 %8, %8, implicit $exec
FLAT_STORE_DWORD %19, %12, 0, 0, implicit $exec, implicit $flat_scr
- %13:vgpr_32 = V_AND_B32_e64 %16, %16, implicit $exec
- FLAT_STORE_DWORD %19, %13, 0, 0, implicit $exec, implicit $flat_scr
+ %21:vgpr_32 = V_AND_B32_e64 %16, %16, implicit $exec
+ FLAT_STORE_DWORD %19, %21, 0, 0, implicit $exec, implicit $flat_scr
S_ENDPGM 0
@@ -191,6 +191,7 @@ body: |
name: v_fold_ashr_imm_regimm_32
tracksRegLiveness: true
+isSSA: true
liveins:
- { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
- { reg: '$vgpr0', virtual-reg: '%2' }
@@ -232,8 +233,8 @@ body: |
%14:vgpr_32 = V_ASHR_I32_e64 7, %29, implicit $exec
FLAT_STORE_DWORD %20, %14, 0, 0, implicit $exec, implicit $flat_scr
- %15:vgpr_32 = V_ASHR_I32_e64 %27, %24, implicit $exec
- FLAT_STORE_DWORD %20, %15, 0, 0, implicit $exec, implicit $flat_scr
+ %33:vgpr_32 = V_ASHR_I32_e64 %27, %24, implicit $exec
+ FLAT_STORE_DWORD %20, %33, 0, 0, implicit $exec, implicit $flat_scr
%22:vgpr_32 = V_ASHR_I32_e64 %6, 4, implicit $exec
FLAT_STORE_DWORD %20, %22, 0, 0, implicit $exec, implicit $flat_scr
@@ -356,8 +357,8 @@ body: |
%14:vgpr_32 = V_LSHR_B32_e64 7, %29, implicit $exec
FLAT_STORE_DWORD %20, %14, 0, 0, implicit $exec, implicit $flat_scr
- %15:vgpr_32 = V_LSHR_B32_e64 %27, %24, implicit $exec
- FLAT_STORE_DWORD %20, %15, 0, 0, implicit $exec, implicit $flat_scr
+ %33:vgpr_32 = V_LSHR_B32_e64 %27, %24, implicit $exec
+ FLAT_STORE_DWORD %20, %33, 0, 0, implicit $exec, implicit $flat_scr
%22:vgpr_32 = V_LSHR_B32_e64 %6, 4, implicit $exec
FLAT_STORE_DWORD %20, %22, 0, 0, implicit $exec, implicit $flat_scr
@@ -497,8 +498,8 @@ body: |
# GCN: %17:vgpr_32 = V_MOV_B32_e32 1234567, implicit $exec
# GCN: FLAT_STORE_DWORD %10, %17,
-# GCN: %3:vgpr_32 = V_MOV_B32_e32 63, implicit $exec
-# GCN: FLAT_STORE_DWORD %10, %3,
+# GCN: %18:vgpr_32 = V_MOV_B32_e32 63, implicit $exec
+# GCN: FLAT_STORE_DWORD %10, %18,
name: v_fold_or_imm_regimm_32
alignment: 0
@@ -536,8 +537,8 @@ body: |
FLAT_STORE_DWORD %19, %11, 0, 0, implicit $exec, implicit $flat_scr
%12:vgpr_32 = V_OR_B32_e64 %8, %8, implicit $exec
FLAT_STORE_DWORD %19, %12, 0, 0, implicit $exec, implicit $flat_scr
- %13:vgpr_32 = V_OR_B32_e64 %16, %16, implicit $exec
- FLAT_STORE_DWORD %19, %13, 0, 0, implicit $exec, implicit $flat_scr
+ %21:vgpr_32 = V_OR_B32_e64 %16, %16, implicit $exec
+ FLAT_STORE_DWORD %19, %21, 0, 0, implicit $exec, implicit $flat_scr
S_ENDPGM 0
...
@@ -689,24 +690,24 @@ body: |
# GCN: %19:vgpr_32 = V_MOV_B32_e32 24, implicit $exec
# GCN: FLAT_STORE_DWORD %10, %19,
-# GCN: %3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
-# GCN: FLAT_STORE_DWORD %10, %3,
-
-# GCN: %20:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
+# GCN: %20:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
# GCN: FLAT_STORE_DWORD %10, %20,
-# GCN: %21:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+# GCN: %21:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
# GCN: FLAT_STORE_DWORD %10, %21,
-# GCN: %22:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
+# GCN: %22:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
# GCN: FLAT_STORE_DWORD %10, %22,
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 7927808, implicit $exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
# GCN: FLAT_STORE_DWORD %10, %23,
-# GCN: %24:vgpr_32 = V_MOV_B32_e32 -8, implicit $exec
+# GCN: %24:vgpr_32 = V_MOV_B32_e32 7927808, implicit $exec
# GCN: FLAT_STORE_DWORD %10, %24,
+# GCN: %25:vgpr_32 = V_MOV_B32_e32 -8, implicit $exec
+# GCN: FLAT_STORE_DWORD %10, %25,
+
name: v_fold_shl_imm_regimm_32
alignment: 0
exposesReturnsTwice: false
@@ -745,8 +746,8 @@ body: |
FLAT_STORE_DWORD %20, %13, 0, 0, implicit $exec, implicit $flat_scr
%14:vgpr_32 = V_LSHL_B32_e64 12, %7, implicit $exec
FLAT_STORE_DWORD %20, %14, 0, 0, implicit $exec, implicit $flat_scr
- %15:vgpr_32 = V_LSHL_B32_e64 12, %24, implicit $exec
- FLAT_STORE_DWORD %20, %15, 0, 0, implicit $exec, implicit $flat_scr
+ %30:vgpr_32 = V_LSHL_B32_e64 12, %24, implicit $exec
+ FLAT_STORE_DWORD %20, %30, 0, 0, implicit $exec, implicit $flat_scr
%22:vgpr_32 = V_LSHL_B32_e64 %6, 12, implicit $exec
FLAT_STORE_DWORD %20, %22, 0, 0, implicit $exec, implicit $flat_scr
%23:vgpr_32 = V_LSHL_B32_e64 %6, 32, implicit $exec
``````````
</details>
https://github.com/llvm/llvm-project/pull/127522
More information about the llvm-commits
mailing list