[llvm] 27a8501 - [msan][NFC] Add f16c-intrinsics.ll tests (#129807)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 4 19:53:05 PST 2025


Author: Thurston Dang
Date: 2025-03-04T19:53:03-08:00
New Revision: 27a8501acc38f9802ec0d4b2e7a50d3ed1721b97

URL: https://github.com/llvm/llvm-project/commit/27a8501acc38f9802ec0d4b2e7a50d3ed1721b97
DIFF: https://github.com/llvm/llvm-project/commit/27a8501acc38f9802ec0d4b2e7a50d3ed1721b97.diff

LOG: [msan][NFC] Add f16c-intrinsics.ll tests (#129807)

Forked from llvm/test/CodeGen/X86/f16c-intrinsics.ll

Handled by visitInstruction:
- llvm.x86.vcvtps2ph.128/256

Added: 
    llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll
new file mode 100644
index 0000000000000..0868e0c836e80
--- /dev/null
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics.ll
@@ -0,0 +1,204 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=msan -S | FileCheck %s
+;
+; Forked from llvm/test/CodeGen/X86/f16c-intrinsics.ll
+;
+; Handled by visitInstruction:
+; - llvm.x86.vcvtps2ph.128/256
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define <8 x i16> @test_x86_vcvtps2ph_128(<4 x float> %a0) #0 {
+; CHECK-LABEL: define <8 x i16> @test_x86_vcvtps2ph_128(
+; CHECK-SAME: <4 x float> [[A0:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1:![0-9]+]]
+; CHECK:       [[BB3]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB4]]:
+; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> [[A0]], i32 0)
+; CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    ret <8 x i16> [[RES]]
+;
+  %res = call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
+  ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float>, i32) nounwind readonly
+
+define <8 x i16> @test_x86_vcvtps2ph_256(<8 x float> %a0) #0 {
+; CHECK-LABEL: define <8 x i16> @test_x86_vcvtps2ph_256(
+; CHECK-SAME: <8 x float> [[A0:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK:       [[BB3]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB4]]:
+; CHECK-NEXT:    [[RES:%.*]] = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> [[A0]], i32 0)
+; CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT:    ret <8 x i16> [[RES]]
+;
+  %res = call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a0, i32 0) ; <<8 x i16>> [#uses=1]
+  ret <8 x i16> %res
+}
+declare <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float>, i32) nounwind readonly
+
+
+define void @test_x86_vcvtps2ph_256_m(ptr nocapture %d, <8 x float> %a) nounwind #0 {
+; CHECK-LABEL: define void @test_x86_vcvtps2ph_256_m(
+; CHECK-SAME: ptr captures(none) [[D:%.*]], <8 x float> [[A:%.*]]) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP17:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP18:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i32> [[TMP17]] to i256
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK:       [[BB3]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB4]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> [[A]], i32 3)
+; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP18]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP1]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK:       [[BB6]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB7]]:
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[D]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
+; CHECK-NEXT:    store <8 x i16> zeroinitializer, ptr [[TMP3]], align 16
+; CHECK-NEXT:    store <8 x i16> [[TMP0]], ptr [[D]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.256(<8 x float> %a, i32 3)
+  store <8 x i16> %0, ptr %d, align 16
+  ret void
+}
+
+define void @test_x86_vcvtps2ph_128_m(ptr nocapture %d, <4 x float> %a) nounwind #0 {
+; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m(
+; CHECK-SAME: ptr captures(none) [[D:%.*]], <4 x float> [[A:%.*]]) #[[ATTR2]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP10:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x i32> [[TMP9]] to i128
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK:       [[BB3]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB4]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> [[A]], i32 3)
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <8 x i16> [[TMP0]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP10]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP1]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]]
+; CHECK:       [[BB7]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB8]]:
+; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[D]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT:    store <4 x i16> zeroinitializer, ptr [[TMP4]], align 8
+; CHECK-NEXT:    store <4 x i16> [[TMP1]], ptr [[D]], align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %a, i32 3)
+  %1 = shufflevector <8 x i16> %0, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  store <4 x i16> %1, ptr %d, align 8
+  ret void
+}
+
+define void @test_x86_vcvtps2ph_128_m2(ptr nocapture %hf4x16, <4 x float> %f4X86) #0 {
+; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m2(
+; CHECK-SAME: ptr captures(none) [[HF4X16:%.*]], <4 x float> [[F4X86:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to i128
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK:       [[BB3]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB4]]:
+; CHECK-NEXT:    [[TMP11:%.*]] = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> [[F4X86]], i32 3)
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x i16> [[TMP11]] to <2 x double>
+; CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <2 x double> [[TMP12]], i32 0
+; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP1]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]]
+; CHECK:       [[BB7]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB8]]:
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[HF4X16]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store i64 0, ptr [[TMP17]], align 8
+; CHECK-NEXT:    store double [[VECEXT]], ptr [[HF4X16]], align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4X86, i32 3)
+  %1 = bitcast <8 x i16> %0 to <2 x double>
+  %vecext = extractelement <2 x double> %1, i32 0
+  store double %vecext, ptr %hf4x16, align 8
+  ret void
+}
+
+define void @test_x86_vcvtps2ph_128_m3(ptr nocapture %hf4x16, <4 x float> %f4X86) #0 {
+; CHECK-LABEL: define void @test_x86_vcvtps2ph_128_m3(
+; CHECK-SAME: ptr captures(none) [[HF4X16:%.*]], <4 x float> [[F4X86:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP0]] to i128
+; CHECK-NEXT:    [[_MSCMP:%.*]] = icmp ne i128 [[TMP2]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP]], label %[[BB3:.*]], label %[[BB4:.*]], !prof [[PROF1]]
+; CHECK:       [[BB3]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB4]]:
+; CHECK-NEXT:    [[TMP11:%.*]] = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> [[F4X86]], i32 3)
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast <8 x i16> [[TMP11]] to <2 x i64>
+; CHECK-NEXT:    [[VECEXT:%.*]] = extractelement <2 x i64> [[TMP12]], i32 0
+; CHECK-NEXT:    [[_MSCMP1:%.*]] = icmp ne i64 [[TMP1]], 0
+; CHECK-NEXT:    br i1 [[_MSCMP1]], label %[[BB7:.*]], label %[[BB8:.*]], !prof [[PROF1]]
+; CHECK:       [[BB7]]:
+; CHECK-NEXT:    call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT:    unreachable
+; CHECK:       [[BB8]]:
+; CHECK-NEXT:    [[TMP15:%.*]] = ptrtoint ptr [[HF4X16]] to i64
+; CHECK-NEXT:    [[TMP16:%.*]] = xor i64 [[TMP15]], 87960930222080
+; CHECK-NEXT:    [[TMP17:%.*]] = inttoptr i64 [[TMP16]] to ptr
+; CHECK-NEXT:    store i64 0, ptr [[TMP17]], align 8
+; CHECK-NEXT:    store i64 [[VECEXT]], ptr [[HF4X16]], align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = tail call <8 x i16> @llvm.x86.vcvtps2ph.128(<4 x float> %f4X86, i32 3)
+  %1 = bitcast <8 x i16> %0 to <2 x i64>
+  %vecext = extractelement <2 x i64> %1, i32 0
+  store i64 %vecext, ptr %hf4x16, align 8
+  ret void
+}
+
+attributes #0 = { sanitize_memory }
+;.
+; CHECK: [[PROF1]] = !{!"branch_weights", i32 1, i32 1048575}
+;.


        


More information about the llvm-commits mailing list