[llvm] 4cf86bd - [AMDGPU] Regen checks for schedule-barrier
Joe Nash via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 15 11:01:35 PDT 2022
Author: Joe Nash
Date: 2022-03-15T13:35:43-04:00
New Revision: 4cf86bd7445768d7c5d2a73bf55f73f61ce25e7c
URL: https://github.com/llvm/llvm-project/commit/4cf86bd7445768d7c5d2a73bf55f73f61ce25e7c
DIFF: https://github.com/llvm/llvm-project/commit/4cf86bd7445768d7c5d2a73bf55f73f61ce25e7c.diff
LOG: [AMDGPU] Regen checks for schedule-barrier
NFC. Hasn't been updated since script added check-next
Added:
Modified:
llvm/test/CodeGen/AMDGPU/schedule-barrier.mir
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir b/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir
index d08cfd28d0abd..d8ec14397d24a 100644
--- a/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir
+++ b/llvm/test/CodeGen/AMDGPU/schedule-barrier.mir
@@ -12,32 +12,33 @@ body: |
; CHECK-LABEL: name: test
; CHECK: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9
- ; CHECK: undef %0.sub3:vreg_128 = COPY $vgpr9
- ; CHECK: undef %1.sub2:vreg_128 = COPY $vgpr8
- ; CHECK: undef %2.sub1:vreg_128 = COPY $vgpr7
- ; CHECK: undef %8.sub1:vreg_64 = COPY $vgpr1
- ; CHECK: %8.sub0:vreg_64 = COPY $vgpr0
- ; CHECK: undef %3.sub0:vreg_128 = COPY $vgpr6
- ; CHECK: undef %4.sub3:vreg_128 = COPY $vgpr5
- ; CHECK: undef %5.sub2:vreg_128 = COPY $vgpr4
- ; CHECK: undef %6.sub1:vreg_128 = COPY $vgpr3
- ; CHECK: undef %7.sub0:vreg_128 = COPY $vgpr2
- ; CHECK: undef %9.sub0:sgpr_128 = V_READFIRSTLANE_B32 %7.sub0, implicit $exec
- ; CHECK: %9.sub1:sgpr_128 = V_READFIRSTLANE_B32 %6.sub1, implicit $exec
- ; CHECK: %9.sub2:sgpr_128 = V_READFIRSTLANE_B32 %5.sub2, implicit $exec
- ; CHECK: %9.sub3:sgpr_128 = V_READFIRSTLANE_B32 %4.sub3, implicit $exec
- ; CHECK: S_BARRIER
- ; CHECK: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %9, 0, 0, 0, 0, 0, implicit $exec
- ; CHECK: undef %12.sub0:sgpr_128 = V_READFIRSTLANE_B32 %3.sub0, implicit $exec
- ; CHECK: %12.sub1:sgpr_128 = V_READFIRSTLANE_B32 %2.sub1, implicit $exec
- ; CHECK: %12.sub2:sgpr_128 = V_READFIRSTLANE_B32 %1.sub2, implicit $exec
- ; CHECK: %12.sub3:sgpr_128 = V_READFIRSTLANE_B32 %0.sub3, implicit $exec
- ; CHECK: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %12, 0, 0, 0, 0, 0, implicit $exec
- ; CHECK: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[BUFFER_LOAD_DWORD_OFFSET]], [[BUFFER_LOAD_DWORD_OFFSET]], implicit $exec
- ; CHECK: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[BUFFER_LOAD_DWORD_OFFSET1]], [[BUFFER_LOAD_DWORD_OFFSET1]], implicit $exec
- ; CHECK: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_MUL_LO_U32_e64_]], [[V_MUL_LO_U32_e64_1]], implicit $exec
- ; CHECK: GLOBAL_STORE_DWORD %8, [[V_ADD_U32_e32_]], 0, 0, implicit $exec
- ; CHECK: S_ENDPGM 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: undef %0.sub3:vreg_128 = COPY $vgpr9
+ ; CHECK-NEXT: undef %1.sub2:vreg_128 = COPY $vgpr8
+ ; CHECK-NEXT: undef %2.sub1:vreg_128 = COPY $vgpr7
+ ; CHECK-NEXT: undef %8.sub1:vreg_64 = COPY $vgpr1
+ ; CHECK-NEXT: %8.sub0:vreg_64 = COPY $vgpr0
+ ; CHECK-NEXT: undef %3.sub0:vreg_128 = COPY $vgpr6
+ ; CHECK-NEXT: undef %4.sub3:vreg_128 = COPY $vgpr5
+ ; CHECK-NEXT: undef %5.sub2:vreg_128 = COPY $vgpr4
+ ; CHECK-NEXT: undef %6.sub1:vreg_128 = COPY $vgpr3
+ ; CHECK-NEXT: undef %7.sub0:vreg_128 = COPY $vgpr2
+ ; CHECK-NEXT: undef %9.sub0:sgpr_128 = V_READFIRSTLANE_B32 %7.sub0, implicit $exec
+ ; CHECK-NEXT: %9.sub1:sgpr_128 = V_READFIRSTLANE_B32 %6.sub1, implicit $exec
+ ; CHECK-NEXT: %9.sub2:sgpr_128 = V_READFIRSTLANE_B32 %5.sub2, implicit $exec
+ ; CHECK-NEXT: %9.sub3:sgpr_128 = V_READFIRSTLANE_B32 %4.sub3, implicit $exec
+ ; CHECK-NEXT: S_BARRIER
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %9, 0, 0, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: undef %12.sub0:sgpr_128 = V_READFIRSTLANE_B32 %3.sub0, implicit $exec
+ ; CHECK-NEXT: %12.sub1:sgpr_128 = V_READFIRSTLANE_B32 %2.sub1, implicit $exec
+ ; CHECK-NEXT: %12.sub2:sgpr_128 = V_READFIRSTLANE_B32 %1.sub2, implicit $exec
+ ; CHECK-NEXT: %12.sub3:sgpr_128 = V_READFIRSTLANE_B32 %0.sub3, implicit $exec
+ ; CHECK-NEXT: [[BUFFER_LOAD_DWORD_OFFSET1:%[0-9]+]]:vgpr_32 = BUFFER_LOAD_DWORD_OFFSET %12, 0, 0, 0, 0, 0, implicit $exec
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[BUFFER_LOAD_DWORD_OFFSET]], [[BUFFER_LOAD_DWORD_OFFSET]], implicit $exec
+ ; CHECK-NEXT: [[V_MUL_LO_U32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_LO_U32_e64 [[BUFFER_LOAD_DWORD_OFFSET1]], [[BUFFER_LOAD_DWORD_OFFSET1]], implicit $exec
+ ; CHECK-NEXT: [[V_ADD_U32_e32_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e32 [[V_MUL_LO_U32_e64_]], [[V_MUL_LO_U32_e64_1]], implicit $exec
+ ; CHECK-NEXT: GLOBAL_STORE_DWORD %8, [[V_ADD_U32_e32_]], 0, 0, implicit $exec
+ ; CHECK-NEXT: S_ENDPGM 0
undef %43.sub3:vreg_128 = COPY $vgpr9
undef %42.sub2:vreg_128 = COPY $vgpr8
undef %41.sub1:vreg_128 = COPY $vgpr7
More information about the llvm-commits
mailing list