[llvm] d0d796a - AMDGPU: Don't use branches to entry block in test
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 21 12:37:18 PDT 2022
Author: Matt Arsenault
Date: 2022-06-21T15:37:12-04:00
New Revision: d0d796a40a7064dc4e66c73ff056275e8f40b1fa
URL: https://github.com/llvm/llvm-project/commit/d0d796a40a7064dc4e66c73ff056275e8f40b1fa
DIFF: https://github.com/llvm/llvm-project/commit/d0d796a40a7064dc4e66c73ff056275e8f40b1fa.diff
LOG: AMDGPU: Don't use branches to entry block in test
This created a weird loop making the tested registers live out of the
block, which I don't think is relevant to the purpose of the
tests. This caused regressions when the validity queries are changed
to use tests based whether the use instruction was a kill. If the
register was live out for the loop, it was still live.
I guess we could still do this in a narrow case where the value loops
back, but that's most a pointlessly complex case to handle.
Added:
Modified:
llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir
llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir
index 97df805fc3dc..fd4e74568490 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking-wave32.mir
@@ -17,7 +17,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -39,7 +39,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -65,7 +65,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -91,7 +91,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -109,7 +109,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -131,7 +131,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -153,7 +153,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -179,7 +179,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -202,7 +202,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
$vgpr0 = COPY %1
@@ -229,7 +229,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -247,7 +247,7 @@ name: negated_cond_vop3_sel_wrong_subreg1
body: |
bb.0:
%0:sgpr_32 = IMPLICIT_DEF
- %1.sub1 = IMPLICIT_DEF
+ undef %1.sub1 = IMPLICIT_DEF
%1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%2:sgpr_32 = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
$vcc_lo = S_AND_B32 killed %2, $exec_lo, implicit-def dead $scc
@@ -255,7 +255,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -273,7 +273,7 @@ name: negated_cond_vop3_sel_wrong_subreg2
body: |
bb.0:
%0:sgpr_32 = IMPLICIT_DEF
- %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
+ undef %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%1.sub1 = IMPLICIT_DEF
%2:sgpr_32 = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
$vcc_lo = S_AND_B32 killed %2, $exec_lo, implicit-def dead $scc
@@ -281,7 +281,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -297,7 +297,7 @@ name: negated_cond_vop3_sel_right_subreg1
body: |
bb.0:
%0:sgpr_32 = IMPLICIT_DEF
- %1.sub1 = IMPLICIT_DEF
+ undef %1.sub1 = IMPLICIT_DEF
%1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%2:sgpr_32 = V_CMP_NE_U32_e64 %1.sub0, 1, implicit $exec
$vcc_lo = S_AND_B32 killed %2, $exec_lo, implicit-def dead $scc
@@ -305,7 +305,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -321,7 +321,7 @@ name: negated_cond_vop3_sel_right_subreg2
body: |
bb.0:
%0:sgpr_32 = IMPLICIT_DEF
- %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
+ undef %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%1.sub1 = IMPLICIT_DEF
%2:sgpr_32 = V_CMP_NE_U32_e64 %1.sub0, 1, implicit $exec
$vcc_lo = S_AND_B32 killed %2, $exec_lo, implicit-def dead $scc
@@ -329,7 +329,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -347,7 +347,7 @@ name: negated_cond_vop3_sel_subreg_overlap
body: |
bb.0:
%0:sgpr_32 = IMPLICIT_DEF
- %1.sub2:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
+ undef %1.sub2:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%1.sub2_sub3 = IMPLICIT_DEF
%2:sgpr_32 = V_CMP_NE_U32_e64 %1.sub2, 1, implicit $exec
$vcc_lo = S_AND_B32 killed %2, $exec_lo, implicit-def dead $scc
@@ -355,7 +355,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir
index 35f4825eb244..a9c8163f6e48 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond-exec-masking.mir
@@ -16,7 +16,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -38,7 +38,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -64,7 +64,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -90,7 +90,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -116,7 +116,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -134,7 +134,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -156,7 +156,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -178,7 +178,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -204,7 +204,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -227,7 +227,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
$vgpr0 = COPY %1
@@ -254,7 +254,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -272,7 +272,7 @@ name: negated_cond_vop3_sel_wrong_subreg1
body: |
bb.0:
%0:sreg_64_xexec = IMPLICIT_DEF
- %1.sub1 = IMPLICIT_DEF
+ undef %1.sub1 = IMPLICIT_DEF
%1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
$vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
@@ -280,7 +280,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -298,7 +298,7 @@ name: negated_cond_vop3_sel_wrong_subreg2
body: |
bb.0:
%0:sreg_64_xexec = IMPLICIT_DEF
- %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
+ undef %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%1.sub1 = IMPLICIT_DEF
%2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub1, 1, implicit $exec
$vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
@@ -306,7 +306,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -322,7 +322,7 @@ name: negated_cond_vop3_sel_right_subreg1
body: |
bb.0:
%0:sreg_64_xexec = IMPLICIT_DEF
- %1.sub1 = IMPLICIT_DEF
+ undef %1.sub1 = IMPLICIT_DEF
%1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub0, 1, implicit $exec
$vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
@@ -330,7 +330,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -346,7 +346,7 @@ name: negated_cond_vop3_sel_right_subreg2
body: |
bb.0:
%0:sreg_64_xexec = IMPLICIT_DEF
- %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
+ undef %1.sub0:vreg_64 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%1.sub1 = IMPLICIT_DEF
%2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub0, 1, implicit $exec
$vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
@@ -354,7 +354,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -372,7 +372,7 @@ name: negated_cond_vop3_sel_subreg_overlap
body: |
bb.0:
%0:sreg_64_xexec = IMPLICIT_DEF
- %1.sub2:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
+ undef %1.sub2:vreg_128 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0, implicit $exec
%1.sub2_sub3 = IMPLICIT_DEF
%2:sreg_64_xexec = V_CMP_NE_U32_e64 %1.sub2, 1, implicit $exec
$vcc = S_AND_B64 killed %2, $exec, implicit-def dead $scc
@@ -380,7 +380,7 @@ body: |
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
@@ -472,15 +472,15 @@ body: |
name: negated_cond_subreg
body: |
bb.0:
- %0.sub0_sub1:sgpr_128 = IMPLICIT_DEF
+ undef %0.sub0_sub1:sgpr_128 = IMPLICIT_DEF
%1:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %0.sub0_sub1, implicit $exec
- %2.sub0_sub1:sgpr_128 = V_CMP_NE_U32_e64 %1, 1, implicit $exec
+ undef %2.sub0_sub1:sgpr_128 = V_CMP_NE_U32_e64 %1, 1, implicit $exec
$vcc = S_AND_B64 $exec, %2.sub0_sub1:sgpr_128, implicit-def dead $scc
S_CBRANCH_VCCNZ %bb.2, implicit killed $vcc
S_BRANCH %bb.1
bb.1:
- S_BRANCH %bb.0
+ S_BRANCH %bb.2
bb.2:
S_ENDPGM 0
More information about the llvm-commits
mailing list