[llvm] [MSan] Copy tests for 32-bit architectures (PR #111835)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 10 06:30:19 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: None (k-kashapov)
<details>
<summary>Changes</summary>
As discussed in https://github.com/llvm/llvm-project/pull/109284
Copied msan tests from 64-bit platforms to following 32-bit platforms:
* MIPS
* ARM
* RISCV
* PowerPC
* i386
Most of the tests have been copied form mips64.
Target triple and test contents have not been changed: to be done in next PR.
---
Patch is 681.40 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/111835.diff
19 Files Affected:
- (added) llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll (+92)
- (added) llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll (+92)
- (added) llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll (+82)
- (added) llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll (+149)
- (added) llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll (+125)
- (added) llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppcle.ll (+123)
- (added) llvm/test/Instrumentation/MemorySanitizer/RISCV32/vararg-riscv32.ll (+92)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/avx-intrinsics-x86.ll (+1457)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/avx2-intrinsics-x86.ll (+2154)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/mmx-intrinsics.ll (+3626)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86_bts_asm.ll (+89)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/msan_x86intrinsics.ll (+72)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/sse-intrinsics-x86.ll (+519)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/sse2-intrinsics-x86.ll (+1381)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/sse41-intrinsics-x86.ll (+431)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/vararg-too-large.ll (+34)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/vararg.ll (+15)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/vararg_call.ll (+117)
- (added) llvm/test/Instrumentation/MemorySanitizer/i386/vararg_shadow.ll (+1315)
``````````diff
diff --git a/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
new file mode 100644
index 00000000000000..8c23d954224264
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/ARM32/vararg-arm32.ll
@@ -0,0 +1,92 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
+target triple = "mips64--linux"
+
+define i32 @foo(i32 %guard, ...) {
+ %vl = alloca ptr, align 8
+ call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.va_start(ptr %vl)
+ call void @llvm.va_end(ptr %vl)
+ call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ ret i32 0
+}
+
+; First, check allocation of the save area.
+
+; CHECK-LABEL: @foo
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
+; CHECK: [[B:%.*]] = add i64 0, [[A]]
+; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
+
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
+
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+
+define i32 @bar() {
+ %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+ ret i32 %1
+}
+
+; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
+; array. The first argument is stored at position 4, since it's right
+; justified.
+; CHECK-LABEL: @bar
+; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
+
+; Check multiple fixed arguments.
+declare i32 @foo2(i32 %g1, i32 %g2, ...)
+define i32 @bar2() {
+ %1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @bar2
+; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
+
+; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
+; passed to a variadic function.
+define dso_local i64 @many_args() {
+entry:
+ %ret = call i64 (i64, ...) @sum(i64 120,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1
+ )
+ ret i64 %ret
+}
+
+; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
+; CHECK-LABEL: @many_args
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+declare i64 @sum(i64 %n, ...)
+
+; CHECK: declare void @__msan_maybe_warning_1(i8 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_1(i8 signext, ptr, i32 signext)
+; CHECK: declare void @__msan_maybe_warning_2(i16 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_2(i16 signext, ptr, i32 signext)
+; CHECK: declare void @__msan_maybe_warning_4(i32 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_4(i32 signext, ptr, i32 signext)
+; CHECK: declare void @__msan_maybe_warning_8(i64 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_8(i64 signext, ptr, i32 signext)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
new file mode 100644
index 00000000000000..8c23d954224264
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mips.ll
@@ -0,0 +1,92 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "E-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
+target triple = "mips64--linux"
+
+define i32 @foo(i32 %guard, ...) {
+ %vl = alloca ptr, align 8
+ call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.va_start(ptr %vl)
+ call void @llvm.va_end(ptr %vl)
+ call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ ret i32 0
+}
+
+; First, check allocation of the save area.
+
+; CHECK-LABEL: @foo
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
+; CHECK: [[B:%.*]] = add i64 0, [[A]]
+; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
+
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
+
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+
+define i32 @bar() {
+ %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+ ret i32 %1
+}
+
+; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
+; array. The first argument is stored at position 4, since it's right
+; justified.
+; CHECK-LABEL: @bar
+; CHECK: store i32 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 4) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
+
+; Check multiple fixed arguments.
+declare i32 @foo2(i32 %g1, i32 %g2, ...)
+define i32 @bar2() {
+ %1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @bar2
+; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
+
+; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
+; passed to a variadic function.
+define dso_local i64 @many_args() {
+entry:
+ %ret = call i64 (i64, ...) @sum(i64 120,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1
+ )
+ ret i64 %ret
+}
+
+; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
+; CHECK-LABEL: @many_args
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+declare i64 @sum(i64 %n, ...)
+
+; CHECK: declare void @__msan_maybe_warning_1(i8 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_1(i8 signext, ptr, i32 signext)
+; CHECK: declare void @__msan_maybe_warning_2(i16 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_2(i16 signext, ptr, i32 signext)
+; CHECK: declare void @__msan_maybe_warning_4(i32 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_4(i32 signext, ptr, i32 signext)
+; CHECK: declare void @__msan_maybe_warning_8(i64 signext, i32 signext)
+; CHECK: declare void @__msan_maybe_store_origin_8(i64 signext, ptr, i32 signext)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
new file mode 100644
index 00000000000000..17f4b826be0bee
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/Mips32/vararg-mipsel.ll
@@ -0,0 +1,82 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "e-m:m-i8:8:32-i16:16:32-i64:64-n32:64-S128"
+target triple = "mips64el--linux"
+
+define i32 @foo(i32 %guard, ...) {
+ %vl = alloca ptr, align 8
+ call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.va_start(ptr %vl)
+ call void @llvm.va_end(ptr %vl)
+ call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ ret i32 0
+}
+
+; First, check allocation of the save area.
+
+; CHECK-LABEL: @foo
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
+; CHECK: [[B:%.*]] = add i64 0, [[A]]
+; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
+
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
+
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
+
+define i32 @bar() {
+ %1 = call i32 (i32, ...) @foo(i32 0, i32 1, i64 2, double 3.000000e+00)
+ ret i32 %1
+}
+
+; Save the incoming shadow value from the arguments in the __msan_va_arg_tls
+; array.
+; CHECK-LABEL: @bar
+; CHECK: store i32 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 16) to ptr), align 8
+; CHECK: store {{.*}} 24, {{.*}} @__msan_va_arg_overflow_size_tls
+
+; Check multiple fixed arguments.
+declare i32 @foo2(i32 %g1, i32 %g2, ...)
+define i32 @bar2() {
+ %1 = call i32 (i32, i32, ...) @foo2(i32 0, i32 1, i64 2, double 3.000000e+00)
+ ret i32 %1
+}
+
+; CHECK-LABEL: @bar2
+; CHECK: store i64 0, ptr @__msan_va_arg_tls, align 8
+; CHECK: store i64 0, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 8) to ptr), align 8
+; CHECK: store {{.*}} 16, {{.*}} @__msan_va_arg_overflow_size_tls
+
+; Test that MSan doesn't generate code overflowing __msan_va_arg_tls when too many arguments are
+; passed to a variadic function.
+define dso_local i64 @many_args() {
+entry:
+ %ret = call i64 (i64, ...) @sum(i64 120,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1,
+ i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1
+ )
+ ret i64 %ret
+}
+
+; If the size of __msan_va_arg_tls changes the second argument of `add` must also be changed.
+; CHECK-LABEL: @many_args
+; CHECK: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 792)
+; CHECK-NOT: i64 add (i64 ptrtoint (ptr @__msan_va_arg_tls to i64), i64 800)
+declare i64 @sum(i64 %n, ...)
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
new file mode 100644
index 00000000000000..b4e472a134abd9
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/kernel-ppcle.ll
@@ -0,0 +1,149 @@
+; RUN: opt < %s -S -msan-kernel=1 -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le--linux"
+
+define void @Store1(ptr %p, i8 %x) sanitize_memory {
+entry:
+ store i8 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store1(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_1(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i8 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store2(ptr %p, i16 %x) sanitize_memory {
+entry:
+ store i16 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store2(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_2(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i16 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store4(ptr %p, i32 %x) sanitize_memory {
+entry:
+ store i32 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store4(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_4(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i32 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store8(ptr %p, i64 %x) sanitize_memory {
+entry:
+ store i64 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store8(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_8(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i64 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define void @Store16(ptr %p, i128 %x) sanitize_memory {
+entry:
+ store i128 %x, ptr %p
+ ret void
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Store16(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_store_n(ptr %p, i64 16)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: store i128 {{.+}}, ptr [[SHADOW]]
+; CHECK: ret void
+
+define i8 @Load1(ptr %p) sanitize_memory {
+entry:
+ %0 = load i8, ptr %p
+ ret i8 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load1(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_1(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i8, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i8 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i8 {{.+}}
+
+define i16 @Load2(ptr %p) sanitize_memory {
+entry:
+ %0 = load i16, ptr %p
+ ret i16 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load2(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_2(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i16, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i16 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i16 {{.+}}
+
+define i32 @Load4(ptr %p) sanitize_memory {
+entry:
+ %0 = load i32, ptr %p
+ ret i32 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load4(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_4(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i32, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i32 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i32 {{.+}}
+
+define i64 @Load8(ptr %p) sanitize_memory {
+entry:
+ %0 = load i64, ptr %p
+ ret i64 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load8(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_8(ptr %p)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i64, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i64 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i64 {{.+}}
+
+define i128 @Load16(ptr %p) sanitize_memory {
+entry:
+ %0 = load i128, ptr %p
+ ret i128 %0
+}
+
+; CHECK-LABEL: define {{[^@]+}}@Load16(
+; CHECK: [[META:%[a-z0-9_]+]] = call { ptr, ptr } @__msan_metadata_ptr_for_load_n(ptr %p, i64 16)
+; CHECK: [[SHADOW:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 0
+; CHECK: [[ORIGIN:%[a-z0-9_]+]] = extractvalue { ptr, ptr } [[META]], 1
+; CHECK: [[SHADOW_VAL:%[a-z0-9_]+]] = load i128, ptr [[SHADOW]]
+; CHECK: [[ORIGIN_VAL:%[a-z0-9_]+]] = load i32, ptr [[ORIGIN]]
+; CHECK: store i128 [[SHADOW_VAL]], ptr %retval_shadow
+; CHECK: store i32 [[ORIGIN_VAL]], ptr %retval_origin
+; CHECK: ret i128 {{.+}}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
new file mode 100644
index 00000000000000..db09c5a4771868
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/PowerPC32/vararg-ppc.ll
@@ -0,0 +1,125 @@
+; RUN: opt < %s -S -passes=msan 2>&1 | FileCheck %s
+
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64--linux"
+
+define i32 @foo(i32 %guard, ...) {
+ %vl = alloca ptr, align 8
+ call void @llvm.lifetime.start.p0(i64 32, ptr %vl)
+ call void @llvm.va_start(ptr %vl)
+ call void @llvm.va_end(ptr %vl)
+ call void @llvm.lifetime.end.p0(i64 32, ptr %vl)
+ ret i32 0
+}
+
+; First, check allocation of the save area.
+
+; CHECK-LABEL: @foo
+; CHECK: [[A:%.*]] = load {{.*}} @__msan_va_arg_overflow_size_tls
+; CHECK: [[B:%.*]] = add i64 0, [[A]]
+; CHECK: [[C:%.*]] = alloca {{.*}} [[B]]
+
+; CHECK: call void @llvm.memset.p0.i64(ptr align 8 [[C]], i8 0, i64 [[B]], i1 false)
+
+; CHECK: [[D:%.*]] = call i64 @llvm.umin.i64(i64 [[B]], i64 800)
+; CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[C]], ptr align 8 @__msan_va_arg_tls, i64 [[D]], i1 false)
+
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
+declare void @llvm.va_start(ptr) #2
+declare void @llvm.va_end(ptr) #2
+declare void @llvm.l...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/111835
More information about the llvm-commits
mailing list