[llvm] 35e73e7 - [X86] Regenerate memcpy-scoped-aa.ll
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu May 2 04:29:22 PDT 2024
Author: Simon Pilgrim
Date: 2024-05-02T12:29:05+01:00
New Revision: 35e73e7cc8c4078ea25151da3958a114591bf700
URL: https://github.com/llvm/llvm-project/commit/35e73e7cc8c4078ea25151da3958a114591bf700
DIFF: https://github.com/llvm/llvm-project/commit/35e73e7cc8c4078ea25151da3958a114591bf700.diff
LOG: [X86] Regenerate memcpy-scoped-aa.ll
Added:
Modified:
llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll b/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
index 7765297dc673fc..d3b86786a630c1 100644
--- a/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
+++ b/llvm/test/CodeGen/X86/memcpy-scoped-aa.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=x86_64-linux-gnu -stop-after=finalize-isel -o - %s | FileCheck --check-prefix=MIR %s
; Ensure that the scoped AA is attached on loads/stores lowered from mem ops.
@@ -10,12 +11,21 @@
; MIR-DAG: ![[SET0:[0-9]+]] = !{![[SCOPE0]]}
; MIR-DAG: ![[SET1:[0-9]+]] = !{![[SCOPE1]]}
-; MIR-LABEL: name: test_memcpy
-; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+ ; MIR-LABEL: name: test_memcpy
+ ; MIR: bb.0 (%ir-block.0):
+ ; MIR-NEXT: liveins: $rdi, $rsi
+ ; MIR-NEXT: {{ $}}
+ ; MIR-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
+ ; MIR-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; MIR-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: $eax = COPY [[ADD32rm]]
+ ; MIR-NEXT: RET 0, $eax
%p0 = bitcast ptr %p to ptr
%add.ptr = getelementptr inbounds i32, ptr %p, i64 4
%p1 = bitcast ptr %add.ptr to ptr
@@ -27,12 +37,21 @@ define i32 @test_memcpy(ptr nocapture %p, ptr nocapture readonly %q) {
ret i32 %add
}
-; MIR-LABEL: name: test_memcpy_inline
-; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
define i32 @test_memcpy_inline(ptr nocapture %p, ptr nocapture readonly %q) {
+ ; MIR-LABEL: name: test_memcpy_inline
+ ; MIR: bb.0 (%ir-block.0):
+ ; MIR-NEXT: liveins: $rdi, $rsi
+ ; MIR-NEXT: {{ $}}
+ ; MIR-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
+ ; MIR-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; MIR-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: $eax = COPY [[ADD32rm]]
+ ; MIR-NEXT: RET 0, $eax
%p0 = bitcast ptr %p to ptr
%add.ptr = getelementptr inbounds i32, ptr %p, i64 4
%p1 = bitcast ptr %add.ptr to ptr
@@ -44,12 +63,21 @@ define i32 @test_memcpy_inline(ptr nocapture %p, ptr nocapture readonly %q) {
ret i32 %add
}
-; MIR-LABEL: name: test_memmove
-; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
define i32 @test_memmove(ptr nocapture %p, ptr nocapture readonly %q) {
+ ; MIR-LABEL: name: test_memmove
+ ; MIR: bb.0 (%ir-block.0):
+ ; MIR-NEXT: liveins: $rdi, $rsi
+ ; MIR-NEXT: {{ $}}
+ ; MIR-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
+ ; MIR-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; MIR-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: $eax = COPY [[ADD32rm]]
+ ; MIR-NEXT: RET 0, $eax
%p0 = bitcast ptr %p to ptr
%add.ptr = getelementptr inbounds i32, ptr %p, i64 4
%p1 = bitcast ptr %add.ptr to ptr
@@ -61,11 +89,20 @@ define i32 @test_memmove(ptr nocapture %p, ptr nocapture readonly %q) {
ret i32 %add
}
-; MIR-LABEL: name: test_memset
-; MIR: %2:gr64 = MOV64ri -6148914691236517206
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, %2 :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %2 :: (store (s64) into %ir.p0, align 4, !alias.scope ![[SET0]], !noalias ![[SET1]])
define i32 @test_memset(ptr nocapture %p, ptr nocapture readonly %q) {
+ ; MIR-LABEL: name: test_memset
+ ; MIR: bb.0 (%ir-block.0):
+ ; MIR-NEXT: liveins: $rdi, $rsi
+ ; MIR-NEXT: {{ $}}
+ ; MIR-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
+ ; MIR-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; MIR-NEXT: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri -6148914691236517206
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, [[MOV64ri]] :: (store (s64) into %ir.p0 + 8, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[MOV64ri]] :: (store (s64) into %ir.p0, align 4, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: $eax = COPY [[ADD32rm]]
+ ; MIR-NEXT: RET 0, $eax
%p0 = bitcast ptr %p to ptr
tail call void @llvm.memset.p0.i64(ptr noundef nonnull align 4 dereferenceable(16) %p0, i8 170, i64 16, i1 false), !alias.scope !2, !noalias !4
%v0 = load i32, ptr %q, align 4, !alias.scope !4, !noalias !2
@@ -75,12 +112,21 @@ define i32 @test_memset(ptr nocapture %p, ptr nocapture readonly %q) {
ret i32 %add
}
-; MIR-LABEL: name: test_mempcpy
-; MIR: %2:gr64 = MOV64rm %0, 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: %3:gr64 = MOV64rm %0, 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 8, $noreg, killed %3 :: (store (s64) into %ir.p0 + 8, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
-; MIR-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, killed %2 :: (store (s64) into %ir.p0, align 1, !alias.scope ![[SET0]], !noalias ![[SET1]])
define i32 @test_mempcpy(ptr nocapture %p, ptr nocapture readonly %q) {
+ ; MIR-LABEL: name: test_mempcpy
+ ; MIR: bb.0 (%ir-block.0):
+ ; MIR-NEXT: liveins: $rdi, $rsi
+ ; MIR-NEXT: {{ $}}
+ ; MIR-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rsi
+ ; MIR-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; MIR-NEXT: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 16, $noreg :: (load (s64) from %ir.p1, align 1, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV64rm1:%[0-9]+]]:gr64 = MOV64rm [[COPY1]], 1, $noreg, 24, $noreg :: (load (s64) from %ir.p1 + 8, align 1, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 8, $noreg, killed [[MOV64rm1]] :: (store (s64) into %ir.p0 + 8, align 1, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, killed [[MOV64rm]] :: (store (s64) into %ir.p0, align 1, !alias.scope !0, !noalias !3)
+ ; MIR-NEXT: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load (s32) from %ir.q, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: [[ADD32rm:%[0-9]+]]:gr32 = ADD32rm [[MOV32rm]], [[COPY]], 1, $noreg, 4, $noreg, implicit-def dead $eflags :: (load (s32) from %ir.q1, !alias.scope !3, !noalias !0)
+ ; MIR-NEXT: $eax = COPY [[ADD32rm]]
+ ; MIR-NEXT: RET 0, $eax
%p0 = bitcast ptr %p to ptr
%add.ptr = getelementptr inbounds i32, ptr %p, i64 4
%p1 = bitcast ptr %add.ptr to ptr
More information about the llvm-commits
mailing list