[llvm] 2df37d5 - [NFC][Codegen] Harden a few tests to not rely that volatile store to null isn't erased
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 9 03:32:41 PDT 2021
Author: Roman Lebedev
Date: 2021-07-09T13:30:42+03:00
New Revision: 2df37d5ddd38091aafbb7d338660e58836f4ac80
URL: https://github.com/llvm/llvm-project/commit/2df37d5ddd38091aafbb7d338660e58836f4ac80
DIFF: https://github.com/llvm/llvm-project/commit/2df37d5ddd38091aafbb7d338660e58836f4ac80.diff
LOG: [NFC][Codegen] Harden a few tests to not rely that volatile store to null isn't erased
Added:
Modified:
llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
llvm/test/CodeGen/AMDGPU/early-inline.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
index c1d824b9b79e..9c1e569d6640 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-alignment.ll
@@ -4,7 +4,7 @@
; Long branch is assumed because the block has a higher alignment
; requirement than the function.
-define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y) align 4 #0 {
+define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y, i32* %dst) align 4 #0 {
; CHECK-LABEL: invert_bcc_block_align_higher_func:
; CHECK: ; %bb.0: ; %common.ret
; CHECK-NEXT: cmp w0, w1
@@ -12,17 +12,17 @@ define i32 @invert_bcc_block_align_higher_func(i32 %x, i32 %y) align 4 #0 {
; CHECK-NEXT: mov w9, #42
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: csel w8, w9, w8, eq
-; CHECK-NEXT: str w8, [x8]
+; CHECK-NEXT: str w8, [x2]
; CHECK-NEXT: ret
%1 = icmp eq i32 %x, %y
br i1 %1, label %bb1, label %bb2
bb2:
- store volatile i32 9, i32* undef
+ store volatile i32 9, i32* %dst
ret i32 1
bb1:
- store volatile i32 42, i32* undef
+ store volatile i32 42, i32* %dst
ret i32 0
}
diff --git a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
index 3f9be3a28301..377cb2c55696 100644
--- a/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
+++ b/llvm/test/CodeGen/AArch64/branch-relax-bcc.ll
@@ -1,27 +1,28 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-apple-darwin -aarch64-bcc-offset-bits=3 < %s | FileCheck %s
-define i32 @invert_bcc(float %x, float %y) #0 {
+define i32 @invert_bcc(float %x, float %y, i32* %dst0, i32* %dst1) #0 {
; CHECK-LABEL: invert_bcc:
; CHECK: ; %bb.0:
-; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: fcmp s0, s1
-; CHECK-NEXT: mov w8, #42
-; CHECK-NEXT: b.pl LBB0_3
+; CHECK-NEXT: b.ne LBB0_3
; CHECK-NEXT: b LBB0_2
; CHECK-NEXT: LBB0_3:
-; CHECK-NEXT: b.gt LBB0_2
-; CHECK-NEXT: ; %bb.1: ; %common.ret
-; CHECK-NEXT: str w8, [x8]
-; CHECK-NEXT: ret
-; CHECK-NEXT: LBB0_2: ; %bb2
-; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: b.vc LBB0_1
+; CHECK-NEXT: b LBB0_2
+; CHECK-NEXT: LBB0_1: ; %bb2
; CHECK-NEXT: mov w8, #9
; CHECK-NEXT: ; InlineAsm Start
; CHECK-NEXT: nop
; CHECK-NEXT: nop
; CHECK-NEXT: ; InlineAsm End
-; CHECK-NEXT: str w8, [x8]
+; CHECK-NEXT: str w8, [x0]
+; CHECK-NEXT: mov w0, #1
+; CHECK-NEXT: ret
+; CHECK-NEXT: LBB0_2: ; %bb1
+; CHECK-NEXT: mov w0, wzr
+; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
%1 = fcmp ueq float %x, %y
br i1 %1, label %bb1, label %bb2
@@ -31,11 +32,11 @@ bb2:
"nop
nop",
""() #0
- store volatile i32 9, i32* undef
+ store volatile i32 9, i32* %dst0
ret i32 1
bb1:
- store volatile i32 42, i32* undef
+ store volatile i32 42, i32* %dst1
ret i32 0
}
diff --git a/llvm/test/CodeGen/AMDGPU/early-inline.ll b/llvm/test/CodeGen/AMDGPU/early-inline.ll
index eb533048e8d2..4a3731bf4689 100644
--- a/llvm/test/CodeGen/AMDGPU/early-inline.ll
+++ b/llvm/test/CodeGen/AMDGPU/early-inline.ll
@@ -16,18 +16,18 @@ entry:
; CHECK: mul i32
; CHECK-NOT: call i32
-define amdgpu_kernel void @caller(i32 %x) {
+define amdgpu_kernel void @caller(i32 %x, i32 addrspace(1)* %dst) {
entry:
%res = call i32 @callee(i32 %x)
- store volatile i32 %res, i32 addrspace(1)* undef
+ store volatile i32 %res, i32 addrspace(1)* %dst
ret void
}
; CHECK-LABEL: @alias_caller(
; CHECK-NOT: call
-define amdgpu_kernel void @alias_caller(i32 %x) {
+define amdgpu_kernel void @alias_caller(i32 %x, i32 addrspace(1)* %dst) {
entry:
%res = call i32 @c_alias(i32 %x)
- store volatile i32 %res, i32 addrspace(1)* undef
+ store volatile i32 %res, i32 addrspace(1)* %dst
ret void
}
More information about the llvm-commits
mailing list