[llvm] 6662fe3 - [X86] Add missing vNbf16 handling in X86CallingConv.td file (#127102)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 18 19:04:13 PST 2025
Author: Mikołaj Piróg
Date: 2025-02-19T11:04:10+08:00
New Revision: 6662fe393cab2c4e550002c276813a89d9ab4443
URL: https://github.com/llvm/llvm-project/commit/6662fe393cab2c4e550002c276813a89d9ab4443
DIFF: https://github.com/llvm/llvm-project/commit/6662fe393cab2c4e550002c276813a89d9ab4443.diff
LOG: [X86] Add missing vNbf16 handling in X86CallingConv.td file (#127102)
Lack of these entries caused clang to crash on the following code:
```c
__m256bh fun(__m256bh arg) {
return arg;
}
__m256bh run() {
__m256bh arg= {0};
fun(arg);
}
```
It caused the FastISel to fail since it handled the call lowering basing
on the X86CallingConv table.
Curiously, if FastISel fails somewhere down the line and
selectionDAGISel fallbacks, the crash does not occur. Following code
_does not_ crash:
```c
__m256bh fun(__m256bh arg) {
return arg;
}
__m256bh run() {
__m256bh arg= {0};
return fun(arg);
}
```
This is puzzling to me. Obviously, if FastISel fails then compiler
fallbacks to something else to lower these calls -- but since the
X86callingConv table _doesn't_ have entries for vNbf16 how does this
other thing manage not to crash? It has to use some other mechanism, one
which doesn't use the table. This rises following questions:
- how is this lowering accomplished without, presumably, using the
CallingConv entries?
- why is the table not used? I mean this points to some logic
duplication (fastISel way vs. the other bug-free way)
- How to properly test this? There is a test for vNbf16 values, but it
also must not be using the FastISel path? This duplication of logic
makes it hard to test this, since we don't have direct control whether
the FastISel path or the other one is used.
Nonetheless, this PR fixes the crash, though I didn't create a test for
it, since I am unsure yet how it should look like. I would like to learn
how the working non-FastISel mechanism works; I tried looking for it,
but didn't yet manage to find anything
Added:
llvm/test/CodeGen/X86/bfloat-calling-conv.ll
Modified:
llvm/lib/Target/X86/X86CallingConv.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86CallingConv.td b/llvm/lib/Target/X86/X86CallingConv.td
index 72b103b0bb0c5..cf164acba9ec0 100644
--- a/llvm/lib/Target/X86/X86CallingConv.td
+++ b/llvm/lib/Target/X86/X86CallingConv.td
@@ -267,19 +267,19 @@ def RetCC_X86Common : CallingConv<[
// Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3
// can only be used by ABI non-compliant code. If the target doesn't have XMM
// registers, it won't have vector types.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
// 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
// can only be used by ABI non-compliant code. This vector type is only
// supported while using the AVX target feature.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
// 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
// can only be used by ABI non-compliant code. This vector type is only
// supported while using the AVX-512 target feature.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
// Long double types are always returned in FP0 (even with SSE),
@@ -565,7 +565,7 @@ def CC_X86_64_C : CallingConv<[
CCIfType<[v64i1], CCPromoteToType<v64i8>>,
// The first 8 FP/Vector arguments are passed in XMM registers.
- CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[f16, f32, f64, f128, v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCIfSubtarget<"hasSSE1()",
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
@@ -574,13 +574,13 @@ def CC_X86_64_C : CallingConv<[
// FIXME: This isn't precisely correct; the x86-64 ABI document says that
// fixed arguments to vararg functions are supposed to be passed in
// registers. Actually modeling that would be a lot of work, though.
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCIfSubtarget<"hasAVX()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
YMM4, YMM5, YMM6, YMM7]>>>>,
// The first 8 512-bit vector arguments are passed in ZMM registers.
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCIfSubtarget<"hasAVX512()",
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
@@ -593,14 +593,14 @@ def CC_X86_64_C : CallingConv<[
CCIfType<[f80, f128], CCAssignToStack<0, 0>>,
// Vectors get 16-byte stack slots that are 16-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCAssignToStack<16, 16>>,
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64], CCAssignToStack<16, 16>>,
// 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToStack<32, 32>>,
// 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToStack<64, 64>>
]>;
@@ -631,13 +631,13 @@ def CC_X86_Win64_C : CallingConv<[
CCIfCFGuardTarget<CCAssignToReg<[RAX]>>,
// 128 bit vectors are passed by pointer
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64], CCPassIndirect<i64>>,
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64], CCPassIndirect<i64>>,
// 256 bit vectors are passed by pointer
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64], CCPassIndirect<i64>>,
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64], CCPassIndirect<i64>>,
// 512 bit vectors are passed by pointer
- CCIfType<[v64i8, v32i16, v16i32, v32f16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
+ CCIfType<[v64i8, v32i16, v16i32, v32f16, v32bf16, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
// Long doubles are passed by pointer
CCIfType<[f80], CCPassIndirect<i64>>,
@@ -734,15 +734,15 @@ def CC_X86_64_AnyReg : CallingConv<[
/// values are spilled on the stack.
def CC_X86_32_Vector_Common : CallingConv<[
// Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToStack<16, 16>>,
// 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToStack<32, 32>>,
// 512-bit AVX 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToStack<64, 64>>
]>;
@@ -750,15 +750,15 @@ def CC_X86_32_Vector_Common : CallingConv<[
/// values are spilled on the stack.
def CC_X86_Win32_Vector : CallingConv<[
// Other SSE vectors get 16-byte stack slots that are 4-byte aligned.
- CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToStack<16, 4>>,
// 256-bit AVX vectors get 32-byte stack slots that are 4-byte aligned.
- CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCAssignToStack<32, 4>>,
// 512-bit AVX 512-bit vectors get 64-byte stack slots that are 4-byte aligned.
- CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToStack<64, 4>>
]>;
@@ -766,16 +766,16 @@ def CC_X86_Win32_Vector : CallingConv<[
// vector registers
def CC_X86_32_Vector_Standard : CallingConv<[
// SSE vector arguments are passed in XMM registers.
- CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToReg<[XMM0, XMM1, XMM2]>>>,
// AVX 256-bit vector arguments are passed in YMM registers.
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCIfSubtarget<"hasAVX()",
CCAssignToReg<[YMM0, YMM1, YMM2]>>>>,
// AVX 512-bit vector arguments are passed in ZMM registers.
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToReg<[ZMM0, ZMM1, ZMM2]>>>,
CCIfIsVarArgOnWin<CCDelegateTo<CC_X86_Win32_Vector>>,
@@ -786,16 +786,16 @@ def CC_X86_32_Vector_Standard : CallingConv<[
// vector registers.
def CC_X86_32_Vector_Darwin : CallingConv<[
// SSE vector arguments are passed in XMM registers.
- CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v4f32, v2f64],
+ CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v8f16, v8bf16, v4f32, v2f64],
CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
// AVX 256-bit vector arguments are passed in YMM registers.
- CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v8f32, v4f64],
+ CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v16f16, v16bf16, v8f32, v4f64],
CCIfSubtarget<"hasAVX()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
// AVX 512-bit vector arguments are passed in ZMM registers.
- CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v16f32, v8f64],
+ CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v32f16, v32bf16, v16f32, v8f64],
CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>>,
CCDelegateTo<CC_X86_32_Vector_Common>
diff --git a/llvm/test/CodeGen/X86/bfloat-calling-conv.ll b/llvm/test/CodeGen/X86/bfloat-calling-conv.ll
new file mode 100644
index 0000000000000..ea4d32bae9ccb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/bfloat-calling-conv.ll
@@ -0,0 +1,1162 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -fast-isel=false -mtriple=x86_64-linux-unknown -mattr=+sse2 < %s | FileCheck -check-prefixes=SSE2 %s
+; RUN: llc -fast-isel -mtriple=x86_64-linux-unknown -mattr=+sse2 < %s | FileCheck -check-prefixes=FAST_ISEL_SSE2 %s
+; RUN: llc -fast-isel=false -mtriple=x86_64-linux-unknown -mattr=+avx512bf16,avx512vl < %s | FileCheck -check-prefixes=AVX512BF16 %s
+; RUN: llc -fast-isel -mtriple=x86_64-linux-unknown -mattr=+avx512bf16,avx512vl < %s | FileCheck -check-prefixes=FAST_ISEL_AVX512BF16 %s
+; RUN: llc -fast-isel=false -mtriple=x86_64-linux-unknown -mattr=+avxneconvert < %s | FileCheck -check-prefixes=AVXNECONVERT %s
+; RUN: llc -fast-isel -mtriple=x86_64-linux-unknown -mattr=+avxneconvert < %s | FileCheck -check-prefixes=FAST_ISEL_AVXNECONVERT %s
+
+define bfloat @return_arg_bf16(bfloat %x) #0 {
+; SSE2-LABEL: return_arg_bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rax
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: popq %rax
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $0, %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $0, %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret bfloat %x
+}
+
+define <2 x bfloat> @return_arg_v2bf16(<2 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v2bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v2bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: addq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v2bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v2bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v2bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v2bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <2 x bfloat> %x
+}
+
+define <3 x bfloat> @return_arg_v3bf16(<3 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v3bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v3bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FAST_ISEL_SSE2-NEXT: movaps %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: addq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v3bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v3bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $2, %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: vpextrw $1, %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm2
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %xmm0, %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm1, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %xmm1, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm2, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; FAST_ISEL_AVX512BF16-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v3bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v3bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $2, %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpextrw $1, %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm2
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %xmm0, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %xmm1, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovq %xmm1, %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: movl %eax, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: shrl $16, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; FAST_ISEL_AVXNECONVERT-NEXT: shrq $32, %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpbroadcastw %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <3 x bfloat> %x
+}
+
+define <4 x bfloat> @return_arg_v4bf16(<4 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v4bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v4bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; FAST_ISEL_SSE2-NEXT: addq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v4bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v4bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v4bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v4bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <4 x bfloat> %x
+}
+
+define <8 x bfloat> @return_arg_v8bf16(<8 x bfloat> %x) #0 {
+; SSE2-LABEL: return_arg_v8bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v8bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %r14
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: subq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; FAST_ISEL_SSE2-NEXT: addq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: popq %r14
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v8bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v8bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v8bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v8bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <8 x bfloat> %x
+}
+
+define <16 x bfloat> @return_arg_v16bf16(<16 x bfloat> %x) #0 {
+;
+; SSE2-LABEL: return_arg_v16bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: retq
+;
+; FAST_ISEL_SSE2-LABEL: return_arg_v16bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %r14
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: subq $104, %rsp
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; FAST_ISEL_SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: addq $104, %rsp
+; FAST_ISEL_SSE2-NEXT: popq %rbx
+; FAST_ISEL_SSE2-NEXT: popq %r14
+; FAST_ISEL_SSE2-NEXT: retq
+;
+; AVX512BF16-LABEL: return_arg_v16bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: retq
+;
+; FAST_ISEL_AVX512BF16-LABEL: return_arg_v16bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: retq
+;
+; AVXNECONVERT-LABEL: return_arg_v16bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: retq
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: return_arg_v16bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: retq
+ ret <16 x bfloat> %x
+}
+
+declare bfloat @returns_bf16(bfloat)
+declare <2 x bfloat> @returns_v2bf16(<2 x bfloat>)
+declare <3 x bfloat> @returns_v3bf16(<3 x bfloat>)
+declare <4 x bfloat> @returns_v4bf16(<4 x bfloat>)
+declare <8 x bfloat> @returns_v8bf16(<8 x bfloat>)
+declare <16 x bfloat> @returns_v16bf16(<16 x bfloat>)
+
+define bfloat @call_ret_bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: pinsrw $0, (%rdi), %xmm0
+; SSE2-NEXT: callq returns_bf16 at PLT
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %rax
+; FAST_ISEL_SSE2-NEXT: movzwl (%rdi), %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: callq returns_bf16 at PLT
+;
+; AVX512BF16-LABEL: call_ret_bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rax
+; AVX512BF16-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX512BF16-NEXT: callq returns_bf16 at PLT
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rax
+; FAST_ISEL_AVX512BF16-NEXT: movzwl (%rdi), %eax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_bf16 at PLT
+;
+; AVXNECONVERT-LABEL: call_ret_bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rax
+; AVXNECONVERT-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVXNECONVERT-NEXT: callq returns_bf16 at PLT
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: movzwl (%rdi), %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_bf16 at PLT
+ %val = load bfloat, ptr %ptr
+ call bfloat @returns_bf16(bfloat %val)
+ unreachable
+}
+
+define <2 x bfloat> @call_ret_v2bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v2bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: callq returns_v2bf16 at PLT
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v2bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: movl (%rdi), %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, (%rsp)
+; FAST_ISEL_SSE2-NEXT: movdqa (%rsp), %xmm0
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq returns_v2bf16 at PLT
+;
+; AVX512BF16-LABEL: call_ret_v2bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rax
+; AVX512BF16-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BF16-NEXT: callq returns_v2bf16 at PLT
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v2bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rax
+; FAST_ISEL_AVX512BF16-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v2bf16 at PLT
+;
+; AVXNECONVERT-LABEL: call_ret_v2bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rax
+; AVXNECONVERT-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVXNECONVERT-NEXT: callq returns_v2bf16 at PLT
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v2bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v2bf16 at PLT
+ %val = load <2 x bfloat>, ptr %ptr
+ call <2 x bfloat> @returns_v2bf16(<2 x bfloat> %val)
+ unreachable
+}
+
+define <3 x bfloat> @call_ret_v3bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v3bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: movl 4(%rdi), %eax
+; SSE2-NEXT: pinsrw $0, %eax, %xmm1
+; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: callq returns_v3bf16 at PLT
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v3bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $40, %rsp
+; FAST_ISEL_SSE2-NEXT: movq (%rdi), %rax
+; FAST_ISEL_SSE2-NEXT: movl %eax, %ecx
+; FAST_ISEL_SSE2-NEXT: andl $-65536, %ecx # imm = 0xFFFF0000
+; FAST_ISEL_SSE2-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movl %eax, %ecx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ecx
+; FAST_ISEL_SSE2-NEXT: movd %ecx, %xmm0
+; FAST_ISEL_SSE2-NEXT: shrq $32, %rax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; FAST_ISEL_SSE2-NEXT: movaps %xmm1, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq returns_v3bf16 at PLT
+;
+; AVX512BF16-LABEL: call_ret_v3bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rax
+; AVX512BF16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512BF16-NEXT: callq returns_v3bf16 at PLT
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v3bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rax
+; FAST_ISEL_AVX512BF16-NEXT: movq (%rdi), %rax
+; FAST_ISEL_AVX512BF16-NEXT: movl %eax, %ecx
+; FAST_ISEL_AVX512BF16-NEXT: andl $-65536, %ecx # imm = 0xFFFF0000
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %ecx, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: movl %eax, %ecx
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %ecx
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %ecx, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: shrq $32, %rax
+; FAST_ISEL_AVX512BF16-NEXT: shll $16, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %eax, %xmm2
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm2, %xmm2
+; FAST_ISEL_AVX512BF16-NEXT: vmovd %xmm2, %eax
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm1, %xmm1
+; FAST_ISEL_AVX512BF16-NEXT: vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_AVX512BF16-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v3bf16 at PLT
+;
+; AVXNECONVERT-LABEL: call_ret_v3bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rax
+; AVXNECONVERT-NEXT: movl 4(%rdi), %eax
+; AVXNECONVERT-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; AVXNECONVERT-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVXNECONVERT-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVXNECONVERT-NEXT: callq returns_v3bf16 at PLT
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v3bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: movq (%rdi), %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: movl %eax, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: andl $-65536, %ecx # imm = 0xFFFF0000
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %ecx, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: movl %eax, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %ecx, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: shrq $32, %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: shll $16, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm2
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm2, %xmm2
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %xmm2, %eax
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: {vex} vcvtneps2bf16 %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovq %xmm0, %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: movl %eax, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: shrl $16, %ecx
+; FAST_ISEL_AVXNECONVERT-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_AVXNECONVERT-NEXT: shrq $32, %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovd %eax, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpbroadcastw %xmm1, %xmm1
+; FAST_ISEL_AVXNECONVERT-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3,4,5,6,7]
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v3bf16 at PLT
+ %val = load <3 x bfloat>, ptr %ptr
+ call <3 x bfloat> @returns_v3bf16(<3 x bfloat> %val)
+ unreachable
+}
+
+define <4 x bfloat> @call_ret_v4bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v4bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSE2-NEXT: callq returns_v4bf16 at PLT
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v4bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: subq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: movq (%rdi), %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, {{[0-9]+}}(%rsp)
+; FAST_ISEL_SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm0
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; FAST_ISEL_SSE2-NEXT: callq returns_v4bf16 at PLT
+;
+; AVX512BF16-LABEL: call_ret_v4bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rax
+; AVX512BF16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512BF16-NEXT: callq returns_v4bf16 at PLT
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v4bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rax
+; FAST_ISEL_AVX512BF16-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v4bf16 at PLT
+;
+; AVXNECONVERT-LABEL: call_ret_v4bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rax
+; AVXNECONVERT-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVXNECONVERT-NEXT: callq returns_v4bf16 at PLT
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v4bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v4bf16 at PLT
+ %val = load <4 x bfloat>, ptr %ptr
+ call <4 x bfloat> @returns_v4bf16(<4 x bfloat> %val)
+ unreachable
+}
+
+define <8 x bfloat> @call_ret_v8bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v8bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: movaps (%rdi), %xmm0
+; SSE2-NEXT: callq returns_v8bf16 at PLT
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v8bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %r14
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: subq $56, %rsp
+; FAST_ISEL_SSE2-NEXT: movdqa (%rdi), %xmm1
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movd %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm1
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; FAST_ISEL_SSE2-NEXT: callq returns_v8bf16 at PLT
+;
+; AVX512BF16-LABEL: call_ret_v8bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rax
+; AVX512BF16-NEXT: vmovaps (%rdi), %xmm0
+; AVX512BF16-NEXT: callq returns_v8bf16 at PLT
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v8bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rax
+; FAST_ISEL_AVX512BF16-NEXT: vmovaps (%rdi), %xmm0
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v8bf16 at PLT
+;
+; AVXNECONVERT-LABEL: call_ret_v8bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rax
+; AVXNECONVERT-NEXT: vmovaps (%rdi), %xmm0
+; AVXNECONVERT-NEXT: callq returns_v8bf16 at PLT
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v8bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovaps (%rdi), %xmm0
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v8bf16 at PLT
+ %val = load <8 x bfloat>, ptr %ptr
+ call <8 x bfloat> @returns_v8bf16(<8 x bfloat> %val)
+ unreachable
+}
+
+define <16 x bfloat> @call_ret_v16bf16(ptr %ptr) #0 {
+;
+; SSE2-LABEL: call_ret_v16bf16:
+; SSE2: # %bb.0:
+; SSE2-NEXT: pushq %rax
+; SSE2-NEXT: movaps (%rdi), %xmm0
+; SSE2-NEXT: movaps 16(%rdi), %xmm1
+; SSE2-NEXT: callq returns_v16bf16 at PLT
+;
+; FAST_ISEL_SSE2-LABEL: call_ret_v16bf16:
+; FAST_ISEL_SSE2: # %bb.0:
+; FAST_ISEL_SSE2-NEXT: pushq %r14
+; FAST_ISEL_SSE2-NEXT: pushq %rbx
+; FAST_ISEL_SSE2-NEXT: subq $104, %rsp
+; FAST_ISEL_SSE2-NEXT: movdqa (%rdi), %xmm1
+; FAST_ISEL_SSE2-NEXT: movdqa 16(%rdi), %xmm0
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $7, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $6, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $5, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $4, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $3, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $2, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: pextrw $1, %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movd %eax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movd %xmm1, %eax
+; FAST_ISEL_SSE2-NEXT: shll $16, %eax
+; FAST_ISEL_SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %r14d
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %r14d
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %ebx
+; FAST_ISEL_SSE2-NEXT: shll $16, %ebx
+; FAST_ISEL_SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; FAST_ISEL_SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
+; FAST_ISEL_SSE2-NEXT: callq __truncsfbf2 at PLT
+; FAST_ISEL_SSE2-NEXT: pextrw $0, %xmm0, %eax
+; FAST_ISEL_SSE2-NEXT: movzwl %ax, %eax
+; FAST_ISEL_SSE2-NEXT: orl %ebx, %eax
+; FAST_ISEL_SSE2-NEXT: shlq $32, %rax
+; FAST_ISEL_SSE2-NEXT: orq %r14, %rax
+; FAST_ISEL_SSE2-NEXT: movq %rax, %xmm0
+; FAST_ISEL_SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
+; FAST_ISEL_SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; FAST_ISEL_SSE2-NEXT: callq returns_v16bf16 at PLT
+;
+; AVX512BF16-LABEL: call_ret_v16bf16:
+; AVX512BF16: # %bb.0:
+; AVX512BF16-NEXT: pushq %rax
+; AVX512BF16-NEXT: vmovaps (%rdi), %ymm0
+; AVX512BF16-NEXT: callq returns_v16bf16 at PLT
+;
+; FAST_ISEL_AVX512BF16-LABEL: call_ret_v16bf16:
+; FAST_ISEL_AVX512BF16: # %bb.0:
+; FAST_ISEL_AVX512BF16-NEXT: pushq %rax
+; FAST_ISEL_AVX512BF16-NEXT: vmovaps (%rdi), %ymm0
+; FAST_ISEL_AVX512BF16-NEXT: callq returns_v16bf16 at PLT
+;
+; AVXNECONVERT-LABEL: call_ret_v16bf16:
+; AVXNECONVERT: # %bb.0:
+; AVXNECONVERT-NEXT: pushq %rax
+; AVXNECONVERT-NEXT: vmovaps (%rdi), %ymm0
+; AVXNECONVERT-NEXT: callq returns_v16bf16 at PLT
+;
+; FAST_ISEL_AVXNECONVERT-LABEL: call_ret_v16bf16:
+; FAST_ISEL_AVXNECONVERT: # %bb.0:
+; FAST_ISEL_AVXNECONVERT-NEXT: pushq %rax
+; FAST_ISEL_AVXNECONVERT-NEXT: vmovaps (%rdi), %ymm0
+; FAST_ISEL_AVXNECONVERT-NEXT: callq returns_v16bf16 at PLT
+ %val = load <16 x bfloat>, ptr %ptr
+ call <16 x bfloat> @returns_v16bf16(<16 x bfloat> %val)
+ unreachable
+}
+
+attributes #0 = { nounwind }
More information about the llvm-commits
mailing list