[llvm] [X86] Add missing vNbf16 handling in X86CallingConv.td file (PR #127102)
Mikołaj Piróg via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 17 06:04:28 PST 2025
https://github.com/mikolaj-pirog updated https://github.com/llvm/llvm-project/pull/127102
>From bb19191f64740a78f601b93cfe0064a4c65ce70c Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Thu, 13 Feb 2025 18:49:32 +0100
Subject: [PATCH 1/4] Add missing v16bf16 handling
---
llvm/lib/Target/X86/X86CallingConv.td | 48 +++++++++++++--------------
1 file changed, 24 insertions(+), 24 deletions(-)
diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 72b103b0bb0c5..cf164acba9ec0 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -267,19 +267,19 @@ def RetCC_X86Common : CallingConv<[
// Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
// can only be used by ABI non-compliant code. If the target doesn't have XMM
// registers, it won't have vector types.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
// 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
// can only be used by ABI non-compliant code. This vector type is only
// supported while using the AVX target feature.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
// 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
// can only be used by ABI non-compliant code. This vector type is only
// supported while using the AVX-512 target feature.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
// Long double types are always returned in FP0 (even with SSE),
@@ -565,7 +565,7 @@ def CC_X86_64_C : CallingConv<[
CCIfType<[v64i1], CCPromoteToType<v64i8>>,
// The first 8 FP/Vector arguments are passed in XMM registers.
- CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCIfSubtarget<"hasSSE1()",
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
@@ -574,13 +574,13 @@ def CC_X86_64_C : CallingConv<[
// FIXME: This isn't precisely correct; the x86-64 ABI document says that
// fixed arguments to vararg functions are supposed to be passed in
// registers. Actually modeling that would be a lot of work, though.
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCIfSubtarget<"hasAVX()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
YMM4, YMM5, YMM6, YMM7]>>>>,
// The first 8 512-bit vector arguments are passed in ZMM registers.
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCIfSubtarget<"hasAVX512()",
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
@@ -593,14 +593,14 @@ def CC_X86_64_C : CallingConv<[
CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
// Vectors get 16-byte stack slots that are 16-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCAssignToStack<16, 16>>,
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64], CCAssignToStack<16, 16>>,
// 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToStack<32, 32>>,
// 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToStack<64, 64>>
]>;
@@ -631,13 +631,13 @@ def CC_X86_Win64_C : CallingConv<[
CCIfCFGuardTarget<CCAssignToReg<[RAX]>>,
// 128 bit vectors are passed by pointer
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>,
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64], CCPassIndirect<i64>>,
// 256 bit vectors are passed by pointer
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>,
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64], CCPassIndirect<i64>>,
// 512 bit vectors are passed by pointer
- CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
+ CCIfType<[v64i8, v32i16, v16i32, v32f16, v32bf16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
// Long doubles are passed by pointer
CCIfType<[f80], CCPassIndirect<i64>>,
@@ -734,15 +734,15 @@ def CC_X86_64_AnyReg : CallingConv<[
/// values are spilled on the stack.
def CC_X86_32_Vector_Common : CallingConv<[
// Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToStack<16, 16>>,
// 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToStack<32, 32>>,
// 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToStack<64, 64>>
]>;
@@ -750,15 +750,15 @@ def CC_X86_32_Vector_Common : CallingConv<[
/// values are spilled on the stack.
def CC_X86_Win32_Vector : CallingConv<[
// Other SSE vectors get 16-byte stack slots that are 4-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToStack<16, 4>>,
// 256-bit AVX vectors get 32-byte stack slots that are 4-byte aligned.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToStack<32, 4>>,
// 512-bit AVX 512-bit vectors get 64-byte stack slots that are 4-byte aligned.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToStack<64, 4>>
]>;
@@ -766,16 +766,16 @@ def CC_X86_Win32_Vector : CallingConv<[
// vector registers
def CC_X86_32_Vector_Standard : CallingConv<[
// SSE vector arguments are passed in XMM registers.
- CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
// AVX 256-bit vector arguments are passed in YMM registers.
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCIfSubtarget<"hasAVX()",
CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
// AVX 512-bit vector arguments are passed in ZMM registers.
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
CCIfIsVarArgOnWin<CCDelegateTo<CC_X86_Win32_Vector>>,
@@ -786,16 +786,16 @@ def CC_X86_32_Vector_Standard : CallingConv<[
// vector registers.
def CC_X86_32_Vector_Darwin : CallingConv<[
// SSE vector arguments are passed in XMM registers.
- CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
// AVX 256-bit vector arguments are passed in YMM registers.
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCIfSubtarget<"hasAVX()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
// AVX 512-bit vector arguments are passed in ZMM registers.
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
CCDelegateTo<CC_X86_32_Vector_Common>
>From 57c5ca422910c99a11556e6646432f46819fb8de Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Fri, 14 Feb 2025 13:57:45 +0100
Subject: [PATCH 2/4] Remove custom bf16 lowering from the X86ISelLoweringCall
---
llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index ee4bb758102f4..86474d094399e 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -123,15 +123,6 @@ MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
!Subtarget.hasX87())
return MVT::i32;
- if (isTypeLegal(MVT::f16)) {
- if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
- return getRegisterTypeForCallingConv(
- Context, CC, VT.changeVectorElementType(MVT::f16));
-
- if (VT == MVT::bf16)
- return MVT::f16;
- }
-
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}
>From 4da7807501c2e8dbdf7a2ff2086b40474d46aea0 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Fri, 14 Feb 2025 15:54:43 +0100
Subject: [PATCH 3/4] Revert "Remove custom bf16 lowering from the
X86ISelLoweringCall"
This reverts commit 57c5ca422910c99a11556e6646432f46819fb8de.
---
llvm/lib/Target/X86/X86ISelLoweringCall.cpp | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index 86474d094399e..ee4bb758102f4 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -123,6 +123,15 @@ MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
!Subtarget.hasX87())
return MVT::i32;
+ if (isTypeLegal(MVT::f16)) {
+ if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
+ return getRegisterTypeForCallingConv(
+ Context, CC, VT.changeVectorElementType(MVT::f16));
+
+ if (VT == MVT::bf16)
+ return MVT::f16;
+ }
+
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}
>From f0344135a751d9f7d14a55580a31e7f3f223471d Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Mon, 17 Feb 2025 15:04:09 +0100
Subject: [PATCH 4/4] Create test
---
llvm/test/CodeGen/X86/bfloat-calling-conv.ll | 1035 ++++++++++++++++++
1 file changed, 1035 insertions(+)
create mode 100644 llvm/test/CodeGen/X86/bfloat-calling-conv.ll
diff --git a/llvm/test/CodeGen/X86/bfloat-calling-conv.ll b/llvm/test/CodeGen/X86/bfloat-calling-conv.ll
new file mode 100644
index 0000000000000..d79297d7f012b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/bfloat-calling-conv.ll
@@ -0,0 +1,1035 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -fast-isel=false -mtriple=x86_64-linux-unknown -mattr=+sse2 < %s | FileCheck -check-prefixes=SSE2 %s
+; RUN: llc -fast-isel -mtriple=x86_64-linux-unknown -mattr=+sse2 < %s | FileCheck -check-prefixes=FAST_ISEL_SSE2 %s
+; RUN: llc -fast-isel=false -mtriple=x86_64-linux-unknown -mattr=+avx512bf16,avx512vl < %s | FileCheck -check-prefixes=AVX512BF16 %s
+; RUN: llc -fast-isel -mtriple=x86_64-linux-unknown -mattr=+avx512bf16,avx512vl < %s | FileCheck -check-prefixes=FAST_ISEL_AVX512BF16 %s
+; RUN: llc -fast-isel=false -mtriple=x86_64-linux-unknown -mattr=+avxneconvert < %s | FileCheck -check-prefixes=AVXNECONVERT %s
+; RUN: llc -fast-isel -mtriple=x86_64-linux-unknown -mattr=+avxneconvert < %s | FileCheck -check-prefixes=FAST_ISEL_AVXNECONVERT %s
+
+define bfloat @return_arg_bf16(bfloat %x) #0 {
+; SSE2-LABEL: return_arg_bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rax
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: popq %rax
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $0, %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret bfloat %x
+}
+
+define <2 x bfloat> @return_arg_v2bf16(<2 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v2bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v2bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: addq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v2bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v2bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v2bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v2bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <2 x bfloat> %x
+}
+
+define <3 x bfloat> @return_arg_v3bf16(<3 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v3bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v3bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FAST_ISEL_SSE2-NEXT: movaps %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: addq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v3bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v3bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $2, %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $1, %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm2
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm1, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %xmm1, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm2, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; FAST_ISEL_AVX512BF16-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v3bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v3bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $2, %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $1, %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm2
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %xmm1, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovq %xmm1, %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: movl %eax, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: shrl $16, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; FAST_ISEL_AVXNECONVERT-NEXT: shrq $32, %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpbroadcastw %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <3 x bfloat> %x
+}
+
+define <4 x bfloat> @return_arg_v4bf16(<4 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v4bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v4bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; FAST_ISEL_SSE2-NEXT: addq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v4bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v4bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v4bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v4bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <4 x bfloat> %x
+}
+
+define <8 x bfloat> @return_arg_v8bf16(<8 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v8bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v8bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %r14
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: subq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; FAST_ISEL_SSE2-NEXT: addq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: popq %r14
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v8bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v8bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v8bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v8bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <8 x bfloat> %x
+}
+
+define <16 x bfloat> @return_arg_v16bf16(<16 x bfloat> %x) #0 {
+;
+; SSE2-LABEL: return_arg_v16bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v16bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %r14
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: subq $104, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; FAST_ISEL_SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: addq $104, %rsp
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: popq %r14
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v16bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v16bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v16bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v16bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <16 x bfloat> %x
+}
+
+declare bfloat @returns_bf16(bfloat)
+declare <2 x bfloat> @returns_v2bf16(<2 x bfloat>)
+declare <3 x bfloat> @returns_v3bf16(<3 x bfloat>)
+declare <4 x bfloat> @returns_v4bf16(<4 x bfloat>)
+declare <8 x bfloat> @returns_v8bf16(<8 x bfloat>)
+declare <16 x bfloat> @returns_v16bf16(<16 x bfloat>)
+
+define void @call_ret_bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movq %rdi, %rbx
+; SSE2-NEXT: pinsrw $0, (%rdi), %xmm0
+; SSE2-NEXT: callq returns_bf16 at PLT
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: movw %ax, (%rbx)
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: movq %rdi, %rbx
+; FAST_ISEL_SSE2-NEXT: pinsrw $0, (%rdi), %xmm0
+; FAST_ISEL_SSE2-NEXT: callq returns_bf16 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movw %ax, (%rbx)
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: call_ret_bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rbx
+; AVX512BF16-NEXT: movq %rdi, %rbx
+; AVX512BF16-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX512BF16-NEXT: callq returns_bf16 at PLT
+; AVX512BF16-NEXT: vpextrw $0, %xmm0, (%rbx)
+; AVX512BF16-NEXT: popq %rbx
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVX512BF16-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_bf16 at PLT
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $0, %xmm0, (%rbx)
+; FAST_ISEL_AVX512BF16-NEXT: popq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: call_ret_bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rbx
+; AVXNECONVERT-NEXT: movq %rdi, %rbx
+; AVXNECONVERT-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVXNECONVERT-NEXT: callq returns_bf16 at PLT
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm0, (%rbx)
+; AVXNECONVERT-NEXT: popq %rbx
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_bf16 at PLT
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm0, (%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: popq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ %val = load bfloat, ptr %ptr
+ %bf16 = call bfloat @returns_bf16(bfloat %val)
+ store bfloat %bf16, ptr %ptr
+ ret void
+}
+
+define void @call_ret_v2bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v2bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movq %rdi, %rbx
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: callq returns_v2bf16 at PLT
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: movw %ax, (%rbx)
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: movw %ax, 2(%rbx)
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v2bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: movq %rdi, %rbx
+; FAST_ISEL_SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq returns_v2bf16 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: psrld $16, %xmm0
+; FAST_ISEL_SSE2-NEXT: movw %ax, (%rbx)
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movw %ax, 2(%rbx)
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: call_ret_v2bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rbx
+; AVX512BF16-NEXT: movq %rdi, %rbx
+; AVX512BF16-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BF16-NEXT: callq returns_v2bf16 at PLT
+; AVX512BF16-NEXT: vmovss %xmm0, (%rbx)
+; AVX512BF16-NEXT: popq %rbx
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v2bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVX512BF16-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v2bf16 at PLT
+; FAST_ISEL_AVX512BF16-NEXT: vmovss %xmm0, (%rbx)
+; FAST_ISEL_AVX512BF16-NEXT: popq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: call_ret_v2bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rbx
+; AVXNECONVERT-NEXT: movq %rdi, %rbx
+; AVXNECONVERT-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVXNECONVERT-NEXT: callq returns_v2bf16 at PLT
+; AVXNECONVERT-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm0, (%rbx)
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm1, 2(%rbx)
+; AVXNECONVERT-NEXT: popq %rbx
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v2bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v2bf16 at PLT
+; FAST_ISEL_AVXNECONVERT-NEXT: vpsrld $16, %xmm0, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm0, (%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm1, 2(%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: popq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ %val = load <2 x bfloat>, ptr %ptr
+ %bf16 = call <2 x bfloat> @returns_v2bf16(<2 x bfloat> %val)
+ store <2 x bfloat> %bf16, ptr %ptr
+ ret void
+}
+
+define void @call_ret_v3bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v3bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movq %rdi, %rbx
+; SSE2-NEXT: movl 4(%rdi), %eax
+; SSE2-NEXT: pinsrw $0, %eax, %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: callq returns_v3bf16 at PLT
+; SSE2-NEXT: movd %xmm0, (%rbx)
+; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: movw %ax, 4(%rbx)
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v3bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: movq %rdi, %rbx
+; FAST_ISEL_SSE2-NEXT: movl 4(%rdi), %eax
+; FAST_ISEL_SSE2-NEXT: pinsrw $0, %eax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; FAST_ISEL_SSE2-NEXT: callq returns_v3bf16 at PLT
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, (%rbx)
+; FAST_ISEL_SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movw %ax, 4(%rbx)
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: call_ret_v3bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rbx
+; AVX512BF16-NEXT: movq %rdi, %rbx
+; AVX512BF16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512BF16-NEXT: callq returns_v3bf16 at PLT
+; AVX512BF16-NEXT: vmovss %xmm0, (%rbx)
+; AVX512BF16-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX512BF16-NEXT: vpextrw $0, %xmm0, 4(%rbx)
+; AVX512BF16-NEXT: popq %rbx
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v3bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVX512BF16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v3bf16 at PLT
+; FAST_ISEL_AVX512BF16-NEXT: vmovss %xmm0, (%rbx)
+; FAST_ISEL_AVX512BF16-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $0, %xmm0, 4(%rbx)
+; FAST_ISEL_AVX512BF16-NEXT: popq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: call_ret_v3bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rbx
+; AVXNECONVERT-NEXT: movq %rdi, %rbx
+; AVXNECONVERT-NEXT: movl 4(%rdi), %eax
+; AVXNECONVERT-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; AVXNECONVERT-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVXNECONVERT-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVXNECONVERT-NEXT: callq returns_v3bf16 at PLT
+; AVXNECONVERT-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; AVXNECONVERT-NEXT: vmovss %xmm0, (%rbx)
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm1, 4(%rbx)
+; AVXNECONVERT-NEXT: popq %rbx
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v3bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: movl 4(%rdi), %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; FAST_ISEL_AVXNECONVERT-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v3bf16 at PLT
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovss %xmm0, (%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm1, 4(%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: popq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ %val = load <3 x bfloat>, ptr %ptr
+ %bf16 = call <3 x bfloat> @returns_v3bf16(<3 x bfloat> %val)
+ store <3 x bfloat> %bf16, ptr %ptr
+ ret void
+}
+
+define void @call_ret_v4bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v4bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movq %rdi, %rbx
+; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: callq returns_v4bf16 at PLT
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; SSE2-NEXT: psrlq $48, %xmm2
+; SSE2-NEXT: movw %ax, (%rbx)
+; SSE2-NEXT: pextrw $0, %xmm2, %eax
+; SSE2-NEXT: movw %ax, 6(%rbx)
+; SSE2-NEXT: pextrw $0, %xmm1, %eax
+; SSE2-NEXT: movw %ax, 4(%rbx)
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: movw %ax, 2(%rbx)
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v4bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: movq %rdi, %rbx
+; FAST_ISEL_SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; FAST_ISEL_SSE2-NEXT: callq returns_v4bf16 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, %xmm1
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, %xmm2
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: psrld $16, %xmm0
+; FAST_ISEL_SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; FAST_ISEL_SSE2-NEXT: psrlq $48, %xmm2
+; FAST_ISEL_SSE2-NEXT: movw %ax, (%rbx)
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm2, %eax
+; FAST_ISEL_SSE2-NEXT: movw %ax, 6(%rbx)
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: movw %ax, 4(%rbx)
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movw %ax, 2(%rbx)
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: call_ret_v4bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rbx
+; AVX512BF16-NEXT: movq %rdi, %rbx
+; AVX512BF16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512BF16-NEXT: callq returns_v4bf16 at PLT
+; AVX512BF16-NEXT: vmovlps %xmm0, (%rbx)
+; AVX512BF16-NEXT: popq %rbx
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v4bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVX512BF16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v4bf16 at PLT
+; FAST_ISEL_AVX512BF16-NEXT: vmovlps %xmm0, (%rbx)
+; FAST_ISEL_AVX512BF16-NEXT: popq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: call_ret_v4bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rbx
+; AVXNECONVERT-NEXT: movq %rdi, %rbx
+; AVXNECONVERT-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVXNECONVERT-NEXT: callq returns_v4bf16 at PLT
+; AVXNECONVERT-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVXNECONVERT-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; AVXNECONVERT-NEXT: vpsrlq $48, %xmm0, %xmm3
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm0, (%rbx)
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm3, 6(%rbx)
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm2, 4(%rbx)
+; AVXNECONVERT-NEXT: vpextrw $0, %xmm1, 2(%rbx)
+; AVXNECONVERT-NEXT: popq %rbx
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v4bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v4bf16 at PLT
+; FAST_ISEL_AVXNECONVERT-NEXT: vpsrld $16, %xmm0, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; FAST_ISEL_AVXNECONVERT-NEXT: vpsrlq $48, %xmm0, %xmm3
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm0, (%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm3, 6(%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm2, 4(%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm1, 2(%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: popq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ %val = load <4 x bfloat>, ptr %ptr
+ %bf16 = call <4 x bfloat> @returns_v4bf16(<4 x bfloat> %val)
+ store <4 x bfloat> %bf16, ptr %ptr
+ ret void
+}
+
+define void @call_ret_v8bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v8bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movq %rdi, %rbx
+; SSE2-NEXT: movaps (%rdi), %xmm0
+; SSE2-NEXT: callq returns_v8bf16 at PLT
+; SSE2-NEXT: movaps %xmm0, (%rbx)
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v8bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: movq %rdi, %rbx
+; FAST_ISEL_SSE2-NEXT: movaps (%rdi), %xmm0
+; FAST_ISEL_SSE2-NEXT: callq returns_v8bf16 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps %xmm0, (%rbx)
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: call_ret_v8bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rbx
+; AVX512BF16-NEXT: movq %rdi, %rbx
+; AVX512BF16-NEXT: vmovaps (%rdi), %xmm0
+; AVX512BF16-NEXT: callq returns_v8bf16 at PLT
+; AVX512BF16-NEXT: vmovaps %xmm0, (%rbx)
+; AVX512BF16-NEXT: popq %rbx
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v8bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVX512BF16-NEXT: vmovaps (%rdi), %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v8bf16 at PLT
+; FAST_ISEL_AVX512BF16-NEXT: vmovaps %xmm0, (%rbx)
+; FAST_ISEL_AVX512BF16-NEXT: popq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: call_ret_v8bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rbx
+; AVXNECONVERT-NEXT: movq %rdi, %rbx
+; AVXNECONVERT-NEXT: vmovaps (%rdi), %xmm0
+; AVXNECONVERT-NEXT: callq returns_v8bf16 at PLT
+; AVXNECONVERT-NEXT: vmovaps %xmm0, (%rbx)
+; AVXNECONVERT-NEXT: popq %rbx
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v8bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovaps (%rdi), %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v8bf16 at PLT
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovaps %xmm0, (%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: popq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ %val = load <8 x bfloat>, ptr %ptr
+ %bf16 = call <8 x bfloat> @returns_v8bf16(<8 x bfloat> %val)
+ store <8 x bfloat> %bf16, ptr %ptr
+ ret void
+}
+
+define void @call_ret_v16bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v16bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rbx
+; SSE2-NEXT: movq %rdi, %rbx
+; SSE2-NEXT: movaps (%rdi), %xmm0
+; SSE2-NEXT: movaps 16(%rdi), %xmm1
+; SSE2-NEXT: callq returns_v16bf16 at PLT
+; SSE2-NEXT: movaps %xmm1, 16(%rbx)
+; SSE2-NEXT: movaps %xmm0, (%rbx)
+; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v16bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: movq %rdi, %rbx
+; FAST_ISEL_SSE2-NEXT: movaps (%rdi), %xmm0
+; FAST_ISEL_SSE2-NEXT: movaps 16(%rdi), %xmm1
+; FAST_ISEL_SSE2-NEXT: callq returns_v16bf16 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps %xmm1, 16(%rbx)
+; FAST_ISEL_SSE2-NEXT: movaps %xmm0, (%rbx)
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: call_ret_v16bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rbx
+; AVX512BF16-NEXT: movq %rdi, %rbx
+; AVX512BF16-NEXT: vmovaps (%rdi), %ymm0
+; AVX512BF16-NEXT: callq returns_v16bf16 at PLT
+; AVX512BF16-NEXT: vmovaps %ymm0, (%rbx)
+; AVX512BF16-NEXT: popq %rbx
+; AVX512BF16-NEXT: vzeroupper
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v16bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVX512BF16-NEXT: vmovaps (%rdi), %ymm0
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v16bf16 at PLT
+; FAST_ISEL_AVX512BF16-NEXT: vmovaps %ymm0, (%rbx)
+; FAST_ISEL_AVX512BF16-NEXT: popq %rbx
+; FAST_ISEL_AVX512BF16-NEXT: vzeroupper
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: call_ret_v16bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rbx
+; AVXNECONVERT-NEXT: movq %rdi, %rbx
+; AVXNECONVERT-NEXT: vmovaps (%rdi), %ymm0
+; AVXNECONVERT-NEXT: callq returns_v16bf16 at PLT
+; AVXNECONVERT-NEXT: vmovaps %ymm0, (%rbx)
+; AVXNECONVERT-NEXT: popq %rbx
+; AVXNECONVERT-NEXT: vzeroupper
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v16bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: movq %rdi, %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovaps (%rdi), %ymm0
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v16bf16 at PLT
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovaps %ymm0, (%rbx)
+; FAST_ISEL_AVXNECONVERT-NEXT: popq %rbx
+; FAST_ISEL_AVXNECONVERT-NEXT: vzeroupper
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ %val = load <16 x bfloat>, ptr %ptr
+ %bf16 = call <16 x bfloat> @returns_v16bf16(<16 x bfloat> %val)
+ store <16 x bfloat> %bf16, ptr %ptr
+ ret void
+}
+
+attributes #0 = { nounwind }
More information about the llvm-commits
mailing list