[llvm] MIR: Replace undef with poison in some MIR tests (PR #131282)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 13 22:53:14 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-powerpc
Author: Matt Arsenault (arsenm)
<details>
<summary>Changes</summary>
The IR doesn't matter so much in these.
---
Full diff: https://github.com/llvm/llvm-project/pull/131282.diff
8 Files Affected:
- (modified) llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir (+18-18)
- (modified) llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir (+1-1)
- (modified) llvm/test/CodeGen/MIR/X86/dbg-value-list.mir (+1-1)
- (modified) llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir (+1-1)
- (modified) llvm/test/CodeGen/MIR/X86/instr-symbols-and-mcsymbol-operands.mir (+1-1)
- (modified) llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir (+4-4)
- (modified) llvm/test/CodeGen/MIR/X86/memory-operands.mir (+3-3)
- (modified) llvm/test/CodeGen/MIR/X86/unreachable_block.ll (+1-1)
``````````diff
diff --git a/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
index a6f2154100d55..c1bd781b8cbc0 100644
--- a/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
+++ b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
@@ -6,23 +6,23 @@ body: |
bb.0:
; CHECK-LABEL: name: aligned_memoperands
; CHECK: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 2)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef`, align 8)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, align 2)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12)
- ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr undef` + 12, basealign 8)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison`)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison`, align 2)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison`)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison`, align 8)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison` + 12, align 2)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison` + 12)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison` + 12, align 2)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison` + 12)
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `ptr poison` + 12, basealign 8)
%0:_(p0) = IMPLICIT_DEF
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 2)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 4) ; redundant
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef`, align 8)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 2)
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, align 4) ; redundant
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 2) ; printed as "align"
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 4) ; redundant
- %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr undef` + 12, basealign 8)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison`)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison`, align 2)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison`, align 4) ; redundant
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison`, align 8)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison` + 12, align 2)
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison` + 12, align 4) ; redundant
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison` + 12, basealign 2) ; printed as "align"
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison` + 12, basealign 4) ; redundant
+ %1:_(s32) = G_LOAD %0 :: (load (s32) from `ptr poison` + 12, basealign 8)
...
diff --git a/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir b/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
index c39b506667788..5c74db1a00353 100644
--- a/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
+++ b/llvm/test/CodeGen/MIR/PowerPC/unordered-implicit-registers.mir
@@ -7,7 +7,7 @@
%0 = load i32, ptr %p, align 4
%or = or i32 0, %0
store i32 %or, ptr %p, align 4
- %lnot.1 = icmp eq i32 undef, 0
+ %lnot.1 = icmp eq i32 poison, 0
%lnot.ext.1 = zext i1 %lnot.1 to i32
%shr.i.1 = lshr i32 2072, %lnot.ext.1
%call.lobit.1 = lshr i32 %shr.i.1, 7
diff --git a/llvm/test/CodeGen/MIR/X86/dbg-value-list.mir b/llvm/test/CodeGen/MIR/X86/dbg-value-list.mir
index c384a815e5f90..1707ba9b243a8 100644
--- a/llvm/test/CodeGen/MIR/X86/dbg-value-list.mir
+++ b/llvm/test/CodeGen/MIR/X86/dbg-value-list.mir
@@ -14,7 +14,7 @@
entry:
call void @llvm.dbg.value(metadata i32 %a, metadata !12, metadata !DIExpression()), !dbg !15
call void @llvm.dbg.value(metadata i32 %b, metadata !13, metadata !DIExpression()), !dbg !15
- call void @llvm.dbg.value(metadata i32 undef, metadata !14, metadata !DIExpression()), !dbg !15
+ call void @llvm.dbg.value(metadata i32 poison, metadata !14, metadata !DIExpression()), !dbg !15
%mul = mul nsw i32 %b, %a, !dbg !16
ret i32 %mul, !dbg !17
}
diff --git a/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir b/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir
index f776e38b448ba..ca59e5fe7b8fd 100644
--- a/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/instr-heap-alloc-operands.mir
@@ -7,7 +7,7 @@
define i32 @test(i32 %x) nounwind {
entry:
call ptr @f(i32 %x), !heapallocsite !2
- ret i32 undef
+ ret i32 poison
}
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/CodeGen/MIR/X86/instr-symbols-and-mcsymbol-operands.mir b/llvm/test/CodeGen/MIR/X86/instr-symbols-and-mcsymbol-operands.mir
index 761f4fe43bd9a..c1d912488c2db 100644
--- a/llvm/test/CodeGen/MIR/X86/instr-symbols-and-mcsymbol-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/instr-symbols-and-mcsymbol-operands.mir
@@ -14,7 +14,7 @@
call void @f(i32 %x)
call void @g(i32 %x)
call void @h(i32 %x), !dbg !9
- ret i64 undef
+ ret i64 poison
}
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir b/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir
index 6479bdd400a47..284e9288ca812 100644
--- a/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir
+++ b/llvm/test/CodeGen/MIR/X86/machine-verifier-address.mir
@@ -8,14 +8,14 @@ body: |
bb.0:
successors: %bb.1(0x80000000)
liveins: $rdi, $xmm0
-
+
%1:vr128 = COPY $xmm0
%0:gr64 = COPY $rdi
%2:vr128 = COPY %1
-
+
bb.1:
successors: %bb.1(0x80000000)
-
+
%3:vr256 = AVX_SET0
%4:vr128 = VPSLLDri %2, 31
%5:vr256 = VPMOVSXDQYrr killed %4
@@ -24,7 +24,7 @@ body: |
%6:vr256, %7:vr256 = VGATHERQPDYrm %3, %0, 16, killed %8, 0, $noreg, %5 :: (load unknown-size, align 8)
%9:vr128 = COPY %6.sub_xmm
; CHECK: *** Bad machine code: Displacement in address must fit into 32-bit signed integer ***
- VMOVLPDmr $noreg, 1, $noreg, 1111111111111, $noreg, killed %9 :: (store (s64) into `ptr undef`)
+ VMOVLPDmr $noreg, 1, $noreg, 1111111111111, $noreg, killed %9 :: (store (s64) into `ptr poison`)
JMP_1 %bb.1
; CHECK: LLVM ERROR: Found 2 machine code errors
diff --git a/llvm/test/CodeGen/MIR/X86/memory-operands.mir b/llvm/test/CodeGen/MIR/X86/memory-operands.mir
index 70353cfb01979..6c2c7e36f1fb6 100644
--- a/llvm/test/CodeGen/MIR/X86/memory-operands.mir
+++ b/llvm/test/CodeGen/MIR/X86/memory-operands.mir
@@ -183,7 +183,7 @@
define ptr @undef_value() {
entry:
- %0 = load ptr, ptr undef, align 8
+ %0 = load ptr, ptr poison, align 8
ret ptr %0
}
@@ -515,8 +515,8 @@ tracksRegLiveness: true
body: |
bb.0.entry:
; CHECK-LABEL: name: undef_value
- ; CHECK: $rax = MOV64rm undef $rax, 1, $noreg, 0, $noreg :: (load (s64) from `ptr undef`)
- $rax = MOV64rm undef $rax, 1, _, 0, _ :: (load (s64) from `ptr undef`)
+ ; CHECK: $rax = MOV64rm undef $rax, 1, $noreg, 0, $noreg :: (load (s64) from `ptr poison`)
+ $rax = MOV64rm undef $rax, 1, _, 0, _ :: (load (s64) from `ptr poison`)
RET64 $rax
...
---
diff --git a/llvm/test/CodeGen/MIR/X86/unreachable_block.ll b/llvm/test/CodeGen/MIR/X86/unreachable_block.ll
index 396fef0db95bf..21ada7a200da3 100644
--- a/llvm/test/CodeGen/MIR/X86/unreachable_block.ll
+++ b/llvm/test/CodeGen/MIR/X86/unreachable_block.ll
@@ -35,7 +35,7 @@
; CHECK-NOT: %bb.{{[0-9]+}}.split.true
; CHECK-LABEL: bb.{{[0-9]+}}.split.true:
define void @foo(ptr %bar) {
- br i1 undef, label %true, label %false
+ br i1 poison, label %true, label %false
true:
%v = load i32, ptr %bar
br label %split.true
``````````
</details>
https://github.com/llvm/llvm-project/pull/131282
More information about the llvm-commits
mailing list