[llvm] [msan][NFCI] Add tests for dot product intrinsics on Arm (PR #174042)
Vitaly Buka via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 30 17:44:21 PST 2025
================
@@ -0,0 +1,892 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mattr=+dotprod < %s -passes=msan -S | FileCheck %s
+;
+; Forked from llvm/test/CodeGen/AArch64/neon-dot-product.ll
+;
+; Strictly handled:
+; - llvm.aarch64.neon.sdot.v2i32.v8i8
+; - llvm.aarch64.neon.sdot.v4i32.v16i8
+; - llvm.aarch64.neon.udot.v2i32.v8i8
+; - llvm.aarch64.neon.udot.v4i32.v16i8
+;
+; Heuristically handled: (none)
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-android9001"
+
+declare <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>)
+declare <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32>, <8 x i8>, <8 x i8>)
+declare <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32>, <16 x i8>, <16 x i8>)
+
+define <2 x i32> @test_vdot_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <2 x i32> @test_vdot_u32(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to i64
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1:![0-9]+]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB7]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> [[A]], <8 x i8> [[B]], <8 x i8> [[C]])
+; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <2 x i32> [[VDOT1_I]]
+;
+entry:
+ %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2
+ ret <2 x i32> %vdot1.i
+}
+
+define <4 x i32> @test_vdotq_u32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <4 x i32> @test_vdotq_u32(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP0]] to i128
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP5]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB7]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> [[A]], <16 x i8> [[B]], <16 x i8> [[C]])
+; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <4 x i32> [[VDOT1_I]]
+;
+entry:
+ %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #2
+ ret <4 x i32> %vdot1.i
+}
+
+define <2 x i32> @test_vdot_s32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <2 x i32> @test_vdot_s32(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP0]] to i64
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB7]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> [[A]], <8 x i8> [[B]], <8 x i8> [[C]])
+; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <2 x i32> [[VDOT1_I]]
+;
+entry:
+ %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #2
+ ret <2 x i32> %vdot1.i
+}
+
+define <4 x i32> @test_vdotq_s32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <4 x i32> @test_vdotq_s32(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP0]] to i128
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP5]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB6:.*]], label %[[BB7:.*]], !prof [[PROF1]]
+; CHECK: [[BB6]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB7]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> [[A]], <16 x i8> [[B]], <16 x i8> [[C]])
+; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <4 x i32> [[VDOT1_I]]
+;
+entry:
+ %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #2
+ ret <4 x i32> %vdot1.i
+}
+
+
+define <2 x i32> @test_vdot_u32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <2 x i32> @test_vdot_u32_zero(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to i64
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: br i1 [[_MSOR]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> [[B]], <8 x i8> [[C]])
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i32> zeroinitializer, [[TMP2]]
+; CHECK-NEXT: [[RET:%.*]] = add <2 x i32> [[VDOT1_I]], [[A]]
+; CHECK-NEXT: store <2 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <2 x i32> [[RET]]
+;
+entry:
+ %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
+ %ret = add <2 x i32> %vdot1.i, %a
+ ret <2 x i32> %ret
+}
+
+define <4 x i32> @test_vdotq_u32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <4 x i32> @test_vdotq_u32_zero(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to i128
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: br i1 [[_MSOR]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> [[B]], <16 x i8> [[C]])
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP2]]
+; CHECK-NEXT: [[RET:%.*]] = add <4 x i32> [[VDOT1_I]], [[A]]
+; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <4 x i32> [[RET]]
+;
+entry:
+ %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
+ %ret = add <4 x i32> %vdot1.i, %a
+ ret <4 x i32> %ret
+}
+
+define <2 x i32> @test_vdot_s32_zero(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <2 x i32> @test_vdot_s32_zero(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to i64
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8> [[TMP1]] to i64
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: br i1 [[_MSOR]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> [[B]], <8 x i8> [[C]])
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <2 x i32> zeroinitializer, [[TMP2]]
+; CHECK-NEXT: [[RET:%.*]] = add <2 x i32> [[VDOT1_I]], [[A]]
+; CHECK-NEXT: store <2 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <2 x i32> [[RET]]
+;
+entry:
+ %vdot1.i = call <2 x i32> @llvm.aarch64.neon.sdot.v2i32.v8i8(<2 x i32> zeroinitializer, <8 x i8> %b, <8 x i8> %c) #2
+ %ret = add <2 x i32> %vdot1.i, %a
+ ret <2 x i32> %ret
+}
+
+define <4 x i32> @test_vdotq_s32_zero(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) #0 sanitize_memory {
+; CHECK-LABEL: define <4 x i32> @test_vdotq_s32_zero(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to i128
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP3]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i8> [[TMP1]] to i128
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP4]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: br i1 [[_MSOR]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
+; CHECK: [[BB5]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB6]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> [[B]], <16 x i8> [[C]])
+; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> zeroinitializer, [[TMP2]]
+; CHECK-NEXT: [[RET:%.*]] = add <4 x i32> [[VDOT1_I]], [[A]]
+; CHECK-NEXT: store <4 x i32> [[_MSPROP]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <4 x i32> [[RET]]
+;
+entry:
+ %vdot1.i = call <4 x i32> @llvm.aarch64.neon.sdot.v4i32.v16i8(<4 x i32> zeroinitializer, <16 x i8> %b, <16 x i8> %c) #2
+ %ret = add <4 x i32> %vdot1.i, %a
+ ret <4 x i32> %ret
+}
+
+
+define <2 x i32> @test_vdot_lane_u32(<2 x i32> %a, <8 x i8> %b, <8 x i8> %c) sanitize_memory {
+; CHECK-LABEL: define <2 x i32> @test_vdot_lane_u32(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <8 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+; CHECK-NEXT: [[DOTCAST:%.*]] = bitcast <8 x i8> [[C]] to <2 x i32>
+; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
+; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[DOTCAST]], <2 x i32> poison, <2 x i32> <i32 1, i32 1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[_MSPROP]] to <8 x i8>
+; CHECK-NEXT: [[DOTCAST5:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8>
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to i64
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB8:.*]], label %[[BB9:.*]], !prof [[PROF1]]
+; CHECK: [[BB8]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB9]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> [[A]], <8 x i8> [[B]], <8 x i8> [[DOTCAST5]])
+; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <2 x i32> [[VDOT1_I]]
+;
+entry:
+ %.cast = bitcast <8 x i8> %c to <2 x i32>
+ %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> poison, <2 x i32> <i32 1, i32 1>
+ %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
+ %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cast5) #2
+ ret <2 x i32> %vdot1.i
+}
+
+define <4 x i32> @test_vdotq_lane_u32(<4 x i32> %a, <16 x i8> %b, <8 x i8> %c) sanitize_memory {
+; CHECK-LABEL: define <4 x i32> @test_vdotq_lane_u32(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <16 x i8> [[B:%.*]], <8 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
+; CHECK-NEXT: [[DOTCAST:%.*]] = bitcast <8 x i8> [[C]] to <2 x i32>
+; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[DOTCAST]], <2 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[_MSPROP]] to <16 x i8>
+; CHECK-NEXT: [[DOTCAST3:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8>
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to i128
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP7]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB8:.*]], label %[[BB9:.*]], !prof [[PROF1]]
+; CHECK: [[BB8]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB9]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> [[A]], <16 x i8> [[B]], <16 x i8> [[DOTCAST3]])
+; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <4 x i32> [[VDOT1_I]]
+;
+entry:
+ %.cast = bitcast <8 x i8> %c to <2 x i32>
+ %shuffle = shufflevector <2 x i32> %.cast, <2 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+ %.cast3 = bitcast <4 x i32> %shuffle to <16 x i8>
+ %vdot1.i = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> %a, <16 x i8> %b, <16 x i8> %.cast3) #2
+ ret <4 x i32> %vdot1.i
+}
+
+define <2 x i32> @test_vdot_laneq_u32(<2 x i32> %a, <8 x i8> %b, <16 x i8> %c) sanitize_memory {
+; CHECK-LABEL: define <2 x i32> @test_vdot_laneq_u32(
+; CHECK-SAME: <2 x i32> [[A:%.*]], <8 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 8), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+; CHECK-NEXT: [[DOTCAST:%.*]] = bitcast <16 x i8> [[C]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> splat (i32 -1), <2 x i32> <i32 1, i32 1>
+; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[DOTCAST]], <4 x i32> poison, <2 x i32> <i32 1, i32 1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <2 x i32> [[_MSPROP]] to <8 x i8>
+; CHECK-NEXT: [[DOTCAST5:%.*]] = bitcast <2 x i32> [[SHUFFLE]] to <8 x i8>
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <2 x i32> [[TMP1]] to i64
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP5]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i8> [[TMP2]] to i64
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP6]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = bitcast <8 x i8> [[TMP4]] to i64
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i64 [[TMP7]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB8:.*]], label %[[BB9:.*]], !prof [[PROF1]]
+; CHECK: [[BB8]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB9]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> [[A]], <8 x i8> [[B]], <8 x i8> [[DOTCAST5]])
+; CHECK-NEXT: store <2 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <2 x i32> [[VDOT1_I]]
+;
+entry:
+ %.cast = bitcast <16 x i8> %c to <4 x i32>
+ %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> poison, <2 x i32> <i32 1, i32 1>
+ %.cast5 = bitcast <2 x i32> %shuffle to <8 x i8>
+ %vdot1.i = call <2 x i32> @llvm.aarch64.neon.udot.v2i32.v8i8(<2 x i32> %a, <8 x i8> %b, <8 x i8> %.cast5) #2
+ ret <2 x i32> %vdot1.i
+}
+
+define <4 x i32> @test_vdotq_laneq_u32(<4 x i32> %a, <16 x i8> %b, <16 x i8> %c) sanitize_memory {
+; CHECK-LABEL: define <4 x i32> @test_vdotq_laneq_u32(
+; CHECK-SAME: <4 x i32> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i8> [[C:%.*]]) #[[ATTR1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 32), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr getelementptr (i8, ptr @__msan_param_tls, i64 16), align 8
+; CHECK-NEXT: call void @llvm.donothing()
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
+; CHECK-NEXT: [[DOTCAST:%.*]] = bitcast <16 x i8> [[C]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> splat (i32 -1), <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x i32> [[DOTCAST]], <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[_MSPROP]] to <16 x i8>
+; CHECK-NEXT: [[DOTCAST3:%.*]] = bitcast <4 x i32> [[SHUFFLE]] to <16 x i8>
+; CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP1]] to i128
+; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i128 [[TMP5]], 0
+; CHECK-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP2]] to i128
+; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i128 [[TMP6]], 0
+; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; CHECK-NEXT: [[TMP7:%.*]] = bitcast <16 x i8> [[TMP4]] to i128
+; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i128 [[TMP7]], 0
+; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
+; CHECK-NEXT: br i1 [[_MSOR3]], label %[[BB8:.*]], label %[[BB9:.*]], !prof [[PROF1]]
+; CHECK: [[BB8]]:
+; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
+; CHECK-NEXT: unreachable
+; CHECK: [[BB9]]:
+; CHECK-NEXT: [[VDOT1_I:%.*]] = call <4 x i32> @llvm.aarch64.neon.udot.v4i32.v16i8(<4 x i32> [[A]], <16 x i8> [[B]], <16 x i8> [[DOTCAST3]])
+; CHECK-NEXT: store <4 x i32> zeroinitializer, ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: ret <4 x i32> [[VDOT1_I]]
+;
+entry:
+ %.cast = bitcast <16 x i8> %c to <4 x i32>
+ %shuffle = shufflevector <4 x i32> %.cast, <4 x i32> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
----------------
vitalybuka wrote:
can we instead of shuffle and cast pass as function parameters ?
https://github.com/llvm/llvm-project/pull/174042
More information about the llvm-commits
mailing list