[llvm] 8cc8fda - [AArch64] Also promote vector bf16 INT_TP_FP to f32

Benjamin Kramer via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 4 14:35:41 PST 2024


Author: Benjamin Kramer
Date: 2024-03-04T23:34:56+01:00
New Revision: 8cc8fdaf5c2e799bc758919365bae601e59c03fc

URL: https://github.com/llvm/llvm-project/commit/8cc8fdaf5c2e799bc758919365bae601e59c03fc
DIFF: https://github.com/llvm/llvm-project/commit/8cc8fdaf5c2e799bc758919365bae601e59c03fc.diff

LOG: [AArch64] Also promote vector bf16 INT_TP_FP to f32

This mirrors the scalar version.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/itofp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 63725f840b6fcb..8d6e92a00b5d0d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4421,6 +4421,21 @@ SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
       useSVEForFixedLengthVectorVT(InVT, !Subtarget->isNeonAvailable()))
     return LowerFixedLengthIntToFPToSVE(Op, DAG);
 
+  // Promote bf16 conversions to f32.
+  if (VT.getVectorElementType() == MVT::bf16) {
+    EVT F32 = VT.changeElementType(MVT::f32);
+    if (IsStrict) {
+      SDValue Val = DAG.getNode(Op.getOpcode(), dl, {F32, MVT::Other},
+                                {Op.getOperand(0), In});
+      return DAG.getNode(
+          ISD::STRICT_FP_ROUND, dl, {Op.getValueType(), MVT::Other},
+          {Val.getValue(1), Val.getValue(0), DAG.getIntPtrConstant(0, dl)});
+    }
+    return DAG.getNode(ISD::FP_ROUND, dl, Op.getValueType(),
+                       DAG.getNode(Op.getOpcode(), dl, F32, In),
+                       DAG.getIntPtrConstant(0, dl));
+  }
+
   uint64_t VTSize = VT.getFixedSizeInBits();
   uint64_t InVTSize = InVT.getFixedSizeInBits();
   if (VTSize < InVTSize) {

diff  --git a/llvm/test/CodeGen/AArch64/itofp.ll b/llvm/test/CodeGen/AArch64/itofp.ll
index c40867ff73920c..be57e1e26a9263 100644
--- a/llvm/test/CodeGen/AArch64/itofp.ll
+++ b/llvm/test/CodeGen/AArch64/itofp.ll
@@ -6202,3 +6202,1743 @@ entry:
   %c = uitofp <32 x i8> %a to <32 x half>
   ret <32 x half> %c
 }
+
+define bfloat @stofp_i64_bf16(i64 %a) {
+; CHECK-LABEL: stofp_i64_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf s0, x0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp i64 %a to bfloat
+  ret bfloat %c
+}
+
+define bfloat @utofp_i64_bf16(i64 %a) {
+; CHECK-LABEL: utofp_i64_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf s0, x0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp i64 %a to bfloat
+  ret bfloat %c
+}
+
+define bfloat @stofp_i32_bf16(i32 %a) {
+; CHECK-LABEL: stofp_i32_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf s0, w0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp i32 %a to bfloat
+  ret bfloat %c
+}
+
+define bfloat @utofp_i32_bf16(i32 %a) {
+; CHECK-LABEL: utofp_i32_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf s0, w0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp i32 %a to bfloat
+  ret bfloat %c
+}
+
+define bfloat @stofp_i16_bf16(i16 %a) {
+; CHECK-LABEL: stofp_i16_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sxth w9, w0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    scvtf s0, w9
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp i16 %a to bfloat
+  ret bfloat %c
+}
+
+define bfloat @utofp_i16_bf16(i16 %a) {
+; CHECK-LABEL: utofp_i16_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    and w9, w0, #0xffff
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    ucvtf s0, w9
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp i16 %a to bfloat
+  ret bfloat %c
+}
+
+define bfloat @stofp_i8_bf16(i8 %a) {
+; CHECK-LABEL: stofp_i8_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sxtb w9, w0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    scvtf s0, w9
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp i8 %a to bfloat
+  ret bfloat %c
+}
+
+define bfloat @utofp_i8_bf16(i8 %a) {
+; CHECK-LABEL: utofp_i8_bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    and w9, w0, #0xff
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    ucvtf s0, w9
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w10, w9, #16, #1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp i8 %a to bfloat
+  ret bfloat %c
+}
+
+define <2 x bfloat> @stofp_v2i64_v2bf16(<2 x i64> %a) {
+; CHECK-LABEL: stofp_v2i64_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov x9, v0.d[1]
+; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    scvtf s1, x10
+; CHECK-NEXT:    scvtf s0, x9
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w12, w10, #16, #1
+; CHECK-NEXT:    ubfx w11, w9, #16, #1
+; CHECK-NEXT:    add w9, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    add w8, w12, w8
+; CHECK-NEXT:    add w9, w11, w9
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    lsr w9, w9, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    fmov s1, w9
+; CHECK-NEXT:    mov v0.h[1], v1.h[0]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <2 x i64> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i64_v2bf16(<2 x i64> %a) {
+; CHECK-LABEL: utofp_v2i64_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    mov x9, v0.d[1]
+; CHECK-NEXT:    fmov x10, d0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    ucvtf s1, x10
+; CHECK-NEXT:    ucvtf s0, x9
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w12, w10, #16, #1
+; CHECK-NEXT:    ubfx w11, w9, #16, #1
+; CHECK-NEXT:    add w9, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    add w8, w12, w8
+; CHECK-NEXT:    add w9, w11, w9
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    lsr w9, w9, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    fmov s1, w9
+; CHECK-NEXT:    mov v0.h[1], v1.h[0]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <2 x i64> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i64_v3bf16(<3 x i64> %a) {
+; CHECK-LABEL: stofp_v3i64_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    scvtf v1.2d, v2.2d
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    ushr v3.4s, v0.4s, #16
+; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    and v2.16b, v3.16b, v2.16b
+; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    fcmeq v2.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    bit v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <3 x i64> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i64_v3bf16(<3 x i64> %a) {
+; CHECK-LABEL: utofp_v3i64_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ucvtf v1.2d, v2.2d
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    ushr v3.4s, v0.4s, #16
+; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    and v2.16b, v3.16b, v2.16b
+; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    fcmeq v2.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    bit v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <3 x i64> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i64_v4bf16(<4 x i64> %a) {
+; CHECK-LABEL: stofp_v4i64_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    scvtf v1.2d, v1.2d
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    ushr v3.4s, v0.4s, #16
+; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    and v2.16b, v3.16b, v2.16b
+; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    fcmeq v2.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    bit v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <4 x i64> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i64_v4bf16(<4 x i64> %a) {
+; CHECK-LABEL: utofp_v4i64_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ucvtf v1.2d, v1.2d
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    ushr v3.4s, v0.4s, #16
+; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    and v2.16b, v3.16b, v2.16b
+; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    fcmeq v2.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    bit v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    shrn v0.4h, v0.4s, #16
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <4 x i64> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i64_v8bf16(<8 x i64> %a) {
+; CHECK-LABEL: stofp_v8i64_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v2.2d, v2.2d
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    scvtf v3.2d, v3.2d
+; CHECK-NEXT:    scvtf v1.2d, v1.2d
+; CHECK-NEXT:    fcvtn v2.2s, v2.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    movi v3.4s, #1
+; CHECK-NEXT:    ushr v4.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v5.4s, v0.4s, #16
+; CHECK-NEXT:    add v6.4s, v2.4s, v1.4s
+; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    and v4.16b, v4.16b, v3.16b
+; CHECK-NEXT:    and v3.16b, v5.16b, v3.16b
+; CHECK-NEXT:    fcmeq v5.4s, v2.4s, v2.4s
+; CHECK-NEXT:    orr v2.4s, #64, lsl #16
+; CHECK-NEXT:    add v4.4s, v4.4s, v6.4s
+; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    fcmeq v3.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    bit v2.16b, v4.16b, v5.16b
+; CHECK-NEXT:    bit v0.16b, v1.16b, v3.16b
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <8 x i64> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i64_v8bf16(<8 x i64> %a) {
+; CHECK-LABEL: utofp_v8i64_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v2.2d, v2.2d
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ucvtf v3.2d, v3.2d
+; CHECK-NEXT:    ucvtf v1.2d, v1.2d
+; CHECK-NEXT:    fcvtn v2.2s, v2.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    movi v3.4s, #1
+; CHECK-NEXT:    ushr v4.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v5.4s, v0.4s, #16
+; CHECK-NEXT:    add v6.4s, v2.4s, v1.4s
+; CHECK-NEXT:    add v1.4s, v0.4s, v1.4s
+; CHECK-NEXT:    and v4.16b, v4.16b, v3.16b
+; CHECK-NEXT:    and v3.16b, v5.16b, v3.16b
+; CHECK-NEXT:    fcmeq v5.4s, v2.4s, v2.4s
+; CHECK-NEXT:    orr v2.4s, #64, lsl #16
+; CHECK-NEXT:    add v4.4s, v4.4s, v6.4s
+; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    fcmeq v3.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    bit v2.16b, v4.16b, v5.16b
+; CHECK-NEXT:    bit v0.16b, v1.16b, v3.16b
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <8 x i64> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i64_v16bf16(<16 x i64> %a) {
+; CHECK-LABEL: stofp_v16i64_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v2.2d, v2.2d
+; CHECK-NEXT:    scvtf v0.2d, v0.2d
+; CHECK-NEXT:    scvtf v6.2d, v6.2d
+; CHECK-NEXT:    scvtf v4.2d, v4.2d
+; CHECK-NEXT:    scvtf v3.2d, v3.2d
+; CHECK-NEXT:    scvtf v1.2d, v1.2d
+; CHECK-NEXT:    scvtf v7.2d, v7.2d
+; CHECK-NEXT:    scvtf v5.2d, v5.2d
+; CHECK-NEXT:    fcvtn v2.2s, v2.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn v6.2s, v6.2d
+; CHECK-NEXT:    fcvtn v4.2s, v4.2d
+; CHECK-NEXT:    fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    fcvtn2 v6.4s, v7.2d
+; CHECK-NEXT:    fcvtn2 v4.4s, v5.2d
+; CHECK-NEXT:    movi v3.4s, #1
+; CHECK-NEXT:    ushr v5.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v7.4s, v0.4s, #16
+; CHECK-NEXT:    add v17.4s, v2.4s, v1.4s
+; CHECK-NEXT:    add v19.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ushr v16.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v18.4s, v4.4s, #16
+; CHECK-NEXT:    add v20.4s, v6.4s, v1.4s
+; CHECK-NEXT:    add v1.4s, v4.4s, v1.4s
+; CHECK-NEXT:    and v5.16b, v5.16b, v3.16b
+; CHECK-NEXT:    and v7.16b, v7.16b, v3.16b
+; CHECK-NEXT:    and v16.16b, v16.16b, v3.16b
+; CHECK-NEXT:    and v3.16b, v18.16b, v3.16b
+; CHECK-NEXT:    fcmeq v18.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    add v5.4s, v5.4s, v17.4s
+; CHECK-NEXT:    fcmeq v17.4s, v2.4s, v2.4s
+; CHECK-NEXT:    add v7.4s, v7.4s, v19.4s
+; CHECK-NEXT:    fcmeq v19.4s, v6.4s, v6.4s
+; CHECK-NEXT:    orr v2.4s, #64, lsl #16
+; CHECK-NEXT:    add v16.4s, v16.4s, v20.4s
+; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    fcmeq v3.4s, v4.4s, v4.4s
+; CHECK-NEXT:    orr v6.4s, #64, lsl #16
+; CHECK-NEXT:    orr v4.4s, #64, lsl #16
+; CHECK-NEXT:    bit v2.16b, v5.16b, v17.16b
+; CHECK-NEXT:    bit v0.16b, v7.16b, v18.16b
+; CHECK-NEXT:    mov v5.16b, v19.16b
+; CHECK-NEXT:    bif v1.16b, v4.16b, v3.16b
+; CHECK-NEXT:    bsl v5.16b, v16.16b, v6.16b
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    uzp2 v1.8h, v1.8h, v5.8h
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <16 x i64> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i64_v16bf16(<16 x i64> %a) {
+; CHECK-LABEL: utofp_v16i64_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v2.2d, v2.2d
+; CHECK-NEXT:    ucvtf v0.2d, v0.2d
+; CHECK-NEXT:    ucvtf v6.2d, v6.2d
+; CHECK-NEXT:    ucvtf v4.2d, v4.2d
+; CHECK-NEXT:    ucvtf v3.2d, v3.2d
+; CHECK-NEXT:    ucvtf v1.2d, v1.2d
+; CHECK-NEXT:    ucvtf v7.2d, v7.2d
+; CHECK-NEXT:    ucvtf v5.2d, v5.2d
+; CHECK-NEXT:    fcvtn v2.2s, v2.2d
+; CHECK-NEXT:    fcvtn v0.2s, v0.2d
+; CHECK-NEXT:    fcvtn v6.2s, v6.2d
+; CHECK-NEXT:    fcvtn v4.2s, v4.2d
+; CHECK-NEXT:    fcvtn2 v2.4s, v3.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v1.2d
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    fcvtn2 v6.4s, v7.2d
+; CHECK-NEXT:    fcvtn2 v4.4s, v5.2d
+; CHECK-NEXT:    movi v3.4s, #1
+; CHECK-NEXT:    ushr v5.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v7.4s, v0.4s, #16
+; CHECK-NEXT:    add v17.4s, v2.4s, v1.4s
+; CHECK-NEXT:    add v19.4s, v0.4s, v1.4s
+; CHECK-NEXT:    ushr v16.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v18.4s, v4.4s, #16
+; CHECK-NEXT:    add v20.4s, v6.4s, v1.4s
+; CHECK-NEXT:    add v1.4s, v4.4s, v1.4s
+; CHECK-NEXT:    and v5.16b, v5.16b, v3.16b
+; CHECK-NEXT:    and v7.16b, v7.16b, v3.16b
+; CHECK-NEXT:    and v16.16b, v16.16b, v3.16b
+; CHECK-NEXT:    and v3.16b, v18.16b, v3.16b
+; CHECK-NEXT:    fcmeq v18.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    add v5.4s, v5.4s, v17.4s
+; CHECK-NEXT:    fcmeq v17.4s, v2.4s, v2.4s
+; CHECK-NEXT:    add v7.4s, v7.4s, v19.4s
+; CHECK-NEXT:    fcmeq v19.4s, v6.4s, v6.4s
+; CHECK-NEXT:    orr v2.4s, #64, lsl #16
+; CHECK-NEXT:    add v16.4s, v16.4s, v20.4s
+; CHECK-NEXT:    add v1.4s, v3.4s, v1.4s
+; CHECK-NEXT:    fcmeq v3.4s, v4.4s, v4.4s
+; CHECK-NEXT:    orr v6.4s, #64, lsl #16
+; CHECK-NEXT:    orr v4.4s, #64, lsl #16
+; CHECK-NEXT:    bit v2.16b, v5.16b, v17.16b
+; CHECK-NEXT:    bit v0.16b, v7.16b, v18.16b
+; CHECK-NEXT:    mov v5.16b, v19.16b
+; CHECK-NEXT:    bif v1.16b, v4.16b, v3.16b
+; CHECK-NEXT:    bsl v5.16b, v16.16b, v6.16b
+; CHECK-NEXT:    uzp2 v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    uzp2 v1.8h, v1.8h, v5.8h
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <16 x i64> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i64_v32bf16(<32 x i64> %a) {
+; CHECK-LABEL: stofp_v32i64_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v16.2d, v2.2d
+; CHECK-NEXT:    scvtf v17.2d, v0.2d
+; CHECK-NEXT:    scvtf v18.2d, v3.2d
+; CHECK-NEXT:    scvtf v19.2d, v6.2d
+; CHECK-NEXT:    ldp q24, q23, [sp, #96]
+; CHECK-NEXT:    scvtf v21.2d, v1.2d
+; CHECK-NEXT:    scvtf v22.2d, v4.2d
+; CHECK-NEXT:    scvtf v6.2d, v7.2d
+; CHECK-NEXT:    scvtf v7.2d, v5.2d
+; CHECK-NEXT:    movi v3.4s, #127, msl #8
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    fcvtn v0.2s, v16.2d
+; CHECK-NEXT:    ldp q20, q16, [sp, #32]
+; CHECK-NEXT:    fcvtn v1.2s, v17.2d
+; CHECK-NEXT:    ldp q5, q17, [sp]
+; CHECK-NEXT:    fcvtn v4.2s, v19.2d
+; CHECK-NEXT:    scvtf v23.2d, v23.2d
+; CHECK-NEXT:    scvtf v20.2d, v20.2d
+; CHECK-NEXT:    scvtf v16.2d, v16.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v18.2d
+; CHECK-NEXT:    ldp q19, q18, [sp, #64]
+; CHECK-NEXT:    scvtf v25.2d, v5.2d
+; CHECK-NEXT:    fcvtn v5.2s, v22.2d
+; CHECK-NEXT:    fcvtn2 v1.4s, v21.2d
+; CHECK-NEXT:    scvtf v21.2d, v24.2d
+; CHECK-NEXT:    scvtf v17.2d, v17.2d
+; CHECK-NEXT:    fcvtn2 v4.4s, v6.2d
+; CHECK-NEXT:    scvtf v19.2d, v19.2d
+; CHECK-NEXT:    scvtf v6.2d, v18.2d
+; CHECK-NEXT:    fcvtn v18.2s, v20.2d
+; CHECK-NEXT:    ushr v22.4s, v0.4s, #16
+; CHECK-NEXT:    add v20.4s, v0.4s, v3.4s
+; CHECK-NEXT:    fcvtn2 v5.4s, v7.2d
+; CHECK-NEXT:    fcvtn v24.2s, v25.2d
+; CHECK-NEXT:    ushr v7.4s, v1.4s, #16
+; CHECK-NEXT:    fcvtn v21.2s, v21.2d
+; CHECK-NEXT:    add v26.4s, v1.4s, v3.4s
+; CHECK-NEXT:    ushr v27.4s, v4.4s, #16
+; CHECK-NEXT:    fcvtn v19.2s, v19.2d
+; CHECK-NEXT:    fcvtn2 v18.4s, v16.2d
+; CHECK-NEXT:    and v22.16b, v22.16b, v2.16b
+; CHECK-NEXT:    and v7.16b, v7.16b, v2.16b
+; CHECK-NEXT:    fcmeq v25.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    ushr v28.4s, v5.4s, #16
+; CHECK-NEXT:    fcvtn2 v24.4s, v17.2d
+; CHECK-NEXT:    add v17.4s, v5.4s, v3.4s
+; CHECK-NEXT:    fcvtn2 v21.4s, v23.2d
+; CHECK-NEXT:    and v16.16b, v27.16b, v2.16b
+; CHECK-NEXT:    add v20.4s, v22.4s, v20.4s
+; CHECK-NEXT:    fcvtn2 v19.4s, v6.2d
+; CHECK-NEXT:    add v7.4s, v7.4s, v26.4s
+; CHECK-NEXT:    ushr v26.4s, v18.4s, #16
+; CHECK-NEXT:    and v23.16b, v28.16b, v2.16b
+; CHECK-NEXT:    add v22.4s, v4.4s, v3.4s
+; CHECK-NEXT:    fcmeq v6.4s, v1.4s, v1.4s
+; CHECK-NEXT:    ushr v27.4s, v24.4s, #16
+; CHECK-NEXT:    add v30.4s, v24.4s, v3.4s
+; CHECK-NEXT:    orr v1.4s, #64, lsl #16
+; CHECK-NEXT:    ushr v28.4s, v21.4s, #16
+; CHECK-NEXT:    add v31.4s, v21.4s, v3.4s
+; CHECK-NEXT:    and v26.16b, v26.16b, v2.16b
+; CHECK-NEXT:    add v17.4s, v23.4s, v17.4s
+; CHECK-NEXT:    add v23.4s, v18.4s, v3.4s
+; CHECK-NEXT:    ushr v29.4s, v19.4s, #16
+; CHECK-NEXT:    and v27.16b, v27.16b, v2.16b
+; CHECK-NEXT:    add v3.4s, v19.4s, v3.4s
+; CHECK-NEXT:    add v16.4s, v16.4s, v22.4s
+; CHECK-NEXT:    and v28.16b, v28.16b, v2.16b
+; CHECK-NEXT:    fcmeq v22.4s, v4.4s, v4.4s
+; CHECK-NEXT:    orr v4.4s, #64, lsl #16
+; CHECK-NEXT:    and v2.16b, v29.16b, v2.16b
+; CHECK-NEXT:    fcmeq v29.4s, v5.4s, v5.4s
+; CHECK-NEXT:    orr v5.4s, #64, lsl #16
+; CHECK-NEXT:    add v23.4s, v26.4s, v23.4s
+; CHECK-NEXT:    fcmeq v26.4s, v18.4s, v18.4s
+; CHECK-NEXT:    add v27.4s, v27.4s, v30.4s
+; CHECK-NEXT:    fcmeq v30.4s, v24.4s, v24.4s
+; CHECK-NEXT:    add v28.4s, v28.4s, v31.4s
+; CHECK-NEXT:    fcmeq v31.4s, v21.4s, v21.4s
+; CHECK-NEXT:    add v2.4s, v2.4s, v3.4s
+; CHECK-NEXT:    fcmeq v3.4s, v19.4s, v19.4s
+; CHECK-NEXT:    orr v18.4s, #64, lsl #16
+; CHECK-NEXT:    orr v24.4s, #64, lsl #16
+; CHECK-NEXT:    orr v21.4s, #64, lsl #16
+; CHECK-NEXT:    orr v19.4s, #64, lsl #16
+; CHECK-NEXT:    bit v1.16b, v7.16b, v6.16b
+; CHECK-NEXT:    bit v4.16b, v16.16b, v22.16b
+; CHECK-NEXT:    mov v6.16b, v26.16b
+; CHECK-NEXT:    mov v7.16b, v30.16b
+; CHECK-NEXT:    mov v16.16b, v31.16b
+; CHECK-NEXT:    bit v0.16b, v20.16b, v25.16b
+; CHECK-NEXT:    bit v5.16b, v17.16b, v29.16b
+; CHECK-NEXT:    bsl v3.16b, v2.16b, v19.16b
+; CHECK-NEXT:    bsl v6.16b, v23.16b, v18.16b
+; CHECK-NEXT:    bsl v7.16b, v27.16b, v24.16b
+; CHECK-NEXT:    bsl v16.16b, v28.16b, v21.16b
+; CHECK-NEXT:    uzp2 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp2 v1.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp2 v2.8h, v7.8h, v6.8h
+; CHECK-NEXT:    uzp2 v3.8h, v3.8h, v16.8h
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <32 x i64> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i64_v32bf16(<32 x i64> %a) {
+; CHECK-LABEL: utofp_v32i64_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v16.2d, v2.2d
+; CHECK-NEXT:    ucvtf v17.2d, v0.2d
+; CHECK-NEXT:    ucvtf v18.2d, v3.2d
+; CHECK-NEXT:    ucvtf v19.2d, v6.2d
+; CHECK-NEXT:    ldp q24, q23, [sp, #96]
+; CHECK-NEXT:    ucvtf v21.2d, v1.2d
+; CHECK-NEXT:    ucvtf v22.2d, v4.2d
+; CHECK-NEXT:    ucvtf v6.2d, v7.2d
+; CHECK-NEXT:    ucvtf v7.2d, v5.2d
+; CHECK-NEXT:    movi v3.4s, #127, msl #8
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    fcvtn v0.2s, v16.2d
+; CHECK-NEXT:    ldp q20, q16, [sp, #32]
+; CHECK-NEXT:    fcvtn v1.2s, v17.2d
+; CHECK-NEXT:    ldp q5, q17, [sp]
+; CHECK-NEXT:    fcvtn v4.2s, v19.2d
+; CHECK-NEXT:    ucvtf v23.2d, v23.2d
+; CHECK-NEXT:    ucvtf v20.2d, v20.2d
+; CHECK-NEXT:    ucvtf v16.2d, v16.2d
+; CHECK-NEXT:    fcvtn2 v0.4s, v18.2d
+; CHECK-NEXT:    ldp q19, q18, [sp, #64]
+; CHECK-NEXT:    ucvtf v25.2d, v5.2d
+; CHECK-NEXT:    fcvtn v5.2s, v22.2d
+; CHECK-NEXT:    fcvtn2 v1.4s, v21.2d
+; CHECK-NEXT:    ucvtf v21.2d, v24.2d
+; CHECK-NEXT:    ucvtf v17.2d, v17.2d
+; CHECK-NEXT:    fcvtn2 v4.4s, v6.2d
+; CHECK-NEXT:    ucvtf v19.2d, v19.2d
+; CHECK-NEXT:    ucvtf v6.2d, v18.2d
+; CHECK-NEXT:    fcvtn v18.2s, v20.2d
+; CHECK-NEXT:    ushr v22.4s, v0.4s, #16
+; CHECK-NEXT:    add v20.4s, v0.4s, v3.4s
+; CHECK-NEXT:    fcvtn2 v5.4s, v7.2d
+; CHECK-NEXT:    fcvtn v24.2s, v25.2d
+; CHECK-NEXT:    ushr v7.4s, v1.4s, #16
+; CHECK-NEXT:    fcvtn v21.2s, v21.2d
+; CHECK-NEXT:    add v26.4s, v1.4s, v3.4s
+; CHECK-NEXT:    ushr v27.4s, v4.4s, #16
+; CHECK-NEXT:    fcvtn v19.2s, v19.2d
+; CHECK-NEXT:    fcvtn2 v18.4s, v16.2d
+; CHECK-NEXT:    and v22.16b, v22.16b, v2.16b
+; CHECK-NEXT:    and v7.16b, v7.16b, v2.16b
+; CHECK-NEXT:    fcmeq v25.4s, v0.4s, v0.4s
+; CHECK-NEXT:    orr v0.4s, #64, lsl #16
+; CHECK-NEXT:    ushr v28.4s, v5.4s, #16
+; CHECK-NEXT:    fcvtn2 v24.4s, v17.2d
+; CHECK-NEXT:    add v17.4s, v5.4s, v3.4s
+; CHECK-NEXT:    fcvtn2 v21.4s, v23.2d
+; CHECK-NEXT:    and v16.16b, v27.16b, v2.16b
+; CHECK-NEXT:    add v20.4s, v22.4s, v20.4s
+; CHECK-NEXT:    fcvtn2 v19.4s, v6.2d
+; CHECK-NEXT:    add v7.4s, v7.4s, v26.4s
+; CHECK-NEXT:    ushr v26.4s, v18.4s, #16
+; CHECK-NEXT:    and v23.16b, v28.16b, v2.16b
+; CHECK-NEXT:    add v22.4s, v4.4s, v3.4s
+; CHECK-NEXT:    fcmeq v6.4s, v1.4s, v1.4s
+; CHECK-NEXT:    ushr v27.4s, v24.4s, #16
+; CHECK-NEXT:    add v30.4s, v24.4s, v3.4s
+; CHECK-NEXT:    orr v1.4s, #64, lsl #16
+; CHECK-NEXT:    ushr v28.4s, v21.4s, #16
+; CHECK-NEXT:    add v31.4s, v21.4s, v3.4s
+; CHECK-NEXT:    and v26.16b, v26.16b, v2.16b
+; CHECK-NEXT:    add v17.4s, v23.4s, v17.4s
+; CHECK-NEXT:    add v23.4s, v18.4s, v3.4s
+; CHECK-NEXT:    ushr v29.4s, v19.4s, #16
+; CHECK-NEXT:    and v27.16b, v27.16b, v2.16b
+; CHECK-NEXT:    add v3.4s, v19.4s, v3.4s
+; CHECK-NEXT:    add v16.4s, v16.4s, v22.4s
+; CHECK-NEXT:    and v28.16b, v28.16b, v2.16b
+; CHECK-NEXT:    fcmeq v22.4s, v4.4s, v4.4s
+; CHECK-NEXT:    orr v4.4s, #64, lsl #16
+; CHECK-NEXT:    and v2.16b, v29.16b, v2.16b
+; CHECK-NEXT:    fcmeq v29.4s, v5.4s, v5.4s
+; CHECK-NEXT:    orr v5.4s, #64, lsl #16
+; CHECK-NEXT:    add v23.4s, v26.4s, v23.4s
+; CHECK-NEXT:    fcmeq v26.4s, v18.4s, v18.4s
+; CHECK-NEXT:    add v27.4s, v27.4s, v30.4s
+; CHECK-NEXT:    fcmeq v30.4s, v24.4s, v24.4s
+; CHECK-NEXT:    add v28.4s, v28.4s, v31.4s
+; CHECK-NEXT:    fcmeq v31.4s, v21.4s, v21.4s
+; CHECK-NEXT:    add v2.4s, v2.4s, v3.4s
+; CHECK-NEXT:    fcmeq v3.4s, v19.4s, v19.4s
+; CHECK-NEXT:    orr v18.4s, #64, lsl #16
+; CHECK-NEXT:    orr v24.4s, #64, lsl #16
+; CHECK-NEXT:    orr v21.4s, #64, lsl #16
+; CHECK-NEXT:    orr v19.4s, #64, lsl #16
+; CHECK-NEXT:    bit v1.16b, v7.16b, v6.16b
+; CHECK-NEXT:    bit v4.16b, v16.16b, v22.16b
+; CHECK-NEXT:    mov v6.16b, v26.16b
+; CHECK-NEXT:    mov v7.16b, v30.16b
+; CHECK-NEXT:    mov v16.16b, v31.16b
+; CHECK-NEXT:    bit v0.16b, v20.16b, v25.16b
+; CHECK-NEXT:    bit v5.16b, v17.16b, v29.16b
+; CHECK-NEXT:    bsl v3.16b, v2.16b, v19.16b
+; CHECK-NEXT:    bsl v6.16b, v23.16b, v18.16b
+; CHECK-NEXT:    bsl v7.16b, v27.16b, v24.16b
+; CHECK-NEXT:    bsl v16.16b, v28.16b, v21.16b
+; CHECK-NEXT:    uzp2 v0.8h, v1.8h, v0.8h
+; CHECK-NEXT:    uzp2 v1.8h, v5.8h, v4.8h
+; CHECK-NEXT:    uzp2 v2.8h, v7.8h, v6.8h
+; CHECK-NEXT:    uzp2 v3.8h, v3.8h, v16.8h
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <32 x i64> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}
+
+define <2 x bfloat> @stofp_v2i32_v2bf16(<2 x i32> %a) {
+; CHECK-LABEL: stofp_v2i32_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <2 x i32> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i32_v2bf16(<2 x i32> %a) {
+; CHECK-LABEL: utofp_v2i32_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <2 x i32> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i32_v3bf16(<3 x i32> %a) {
+; CHECK-LABEL: stofp_v3i32_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <3 x i32> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i32_v3bf16(<3 x i32> %a) {
+; CHECK-LABEL: utofp_v3i32_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <3 x i32> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i32_v4bf16(<4 x i32> %a) {
+; CHECK-LABEL: stofp_v4i32_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <4 x i32> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i32_v4bf16(<4 x i32> %a) {
+; CHECK-LABEL: utofp_v4i32_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <4 x i32> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i32_v8bf16(<8 x i32> %a) {
+; CHECK-LABEL: stofp_v8i32_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    scvtf v1.4s, v1.4s
+; CHECK-NEXT:    ushr v3.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v4.4s, v1.4s, #16
+; CHECK-NEXT:    and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT:    and v2.16b, v4.16b, v2.16b
+; CHECK-NEXT:    add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    movi v3.4s, #127, msl #8
+; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v3.4s
+; CHECK-NEXT:    addhn2 v0.8h, v1.4s, v3.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <8 x i32> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i32_v8bf16(<8 x i32> %a) {
+; CHECK-LABEL: utofp_v8i32_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    ucvtf v1.4s, v1.4s
+; CHECK-NEXT:    ushr v3.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v4.4s, v1.4s, #16
+; CHECK-NEXT:    and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT:    and v2.16b, v4.16b, v2.16b
+; CHECK-NEXT:    add v0.4s, v3.4s, v0.4s
+; CHECK-NEXT:    movi v3.4s, #127, msl #8
+; CHECK-NEXT:    add v1.4s, v2.4s, v1.4s
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v3.4s
+; CHECK-NEXT:    addhn2 v0.8h, v1.4s, v3.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <8 x i32> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i32_v16bf16(<16 x i32> %a) {
+; CHECK-LABEL: stofp_v16i32_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    scvtf v2.4s, v2.4s
+; CHECK-NEXT:    movi v4.4s, #1
+; CHECK-NEXT:    scvtf v1.4s, v1.4s
+; CHECK-NEXT:    scvtf v3.4s, v3.4s
+; CHECK-NEXT:    ushr v5.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v6.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v7.4s, v1.4s, #16
+; CHECK-NEXT:    ushr v16.4s, v3.4s, #16
+; CHECK-NEXT:    and v5.16b, v5.16b, v4.16b
+; CHECK-NEXT:    and v6.16b, v6.16b, v4.16b
+; CHECK-NEXT:    add v0.4s, v5.4s, v0.4s
+; CHECK-NEXT:    add v2.4s, v6.4s, v2.4s
+; CHECK-NEXT:    movi v6.4s, #127, msl #8
+; CHECK-NEXT:    and v5.16b, v7.16b, v4.16b
+; CHECK-NEXT:    and v4.16b, v16.16b, v4.16b
+; CHECK-NEXT:    add v5.4s, v5.4s, v1.4s
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v6.4s
+; CHECK-NEXT:    add v3.4s, v4.4s, v3.4s
+; CHECK-NEXT:    addhn v1.4h, v2.4s, v6.4s
+; CHECK-NEXT:    addhn2 v0.8h, v5.4s, v6.4s
+; CHECK-NEXT:    addhn2 v1.8h, v3.4s, v6.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <16 x i32> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i32_v16bf16(<16 x i32> %a) {
+; CHECK-LABEL: utofp_v16i32_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ucvtf v2.4s, v2.4s
+; CHECK-NEXT:    movi v4.4s, #1
+; CHECK-NEXT:    ucvtf v1.4s, v1.4s
+; CHECK-NEXT:    ucvtf v3.4s, v3.4s
+; CHECK-NEXT:    ushr v5.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v6.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v7.4s, v1.4s, #16
+; CHECK-NEXT:    ushr v16.4s, v3.4s, #16
+; CHECK-NEXT:    and v5.16b, v5.16b, v4.16b
+; CHECK-NEXT:    and v6.16b, v6.16b, v4.16b
+; CHECK-NEXT:    add v0.4s, v5.4s, v0.4s
+; CHECK-NEXT:    add v2.4s, v6.4s, v2.4s
+; CHECK-NEXT:    movi v6.4s, #127, msl #8
+; CHECK-NEXT:    and v5.16b, v7.16b, v4.16b
+; CHECK-NEXT:    and v4.16b, v16.16b, v4.16b
+; CHECK-NEXT:    add v5.4s, v5.4s, v1.4s
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v6.4s
+; CHECK-NEXT:    add v3.4s, v4.4s, v3.4s
+; CHECK-NEXT:    addhn v1.4h, v2.4s, v6.4s
+; CHECK-NEXT:    addhn2 v0.8h, v5.4s, v6.4s
+; CHECK-NEXT:    addhn2 v1.8h, v3.4s, v6.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <16 x i32> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i32_v32bf16(<32 x i32> %a) {
+; CHECK-LABEL: stofp_v32i32_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    scvtf v2.4s, v2.4s
+; CHECK-NEXT:    scvtf v4.4s, v4.4s
+; CHECK-NEXT:    scvtf v6.4s, v6.4s
+; CHECK-NEXT:    movi v16.4s, #1
+; CHECK-NEXT:    scvtf v1.4s, v1.4s
+; CHECK-NEXT:    scvtf v3.4s, v3.4s
+; CHECK-NEXT:    scvtf v5.4s, v5.4s
+; CHECK-NEXT:    scvtf v7.4s, v7.4s
+; CHECK-NEXT:    ushr v17.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v18.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v19.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v20.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v21.4s, v1.4s, #16
+; CHECK-NEXT:    ushr v22.4s, v3.4s, #16
+; CHECK-NEXT:    ushr v23.4s, v5.4s, #16
+; CHECK-NEXT:    and v17.16b, v17.16b, v16.16b
+; CHECK-NEXT:    and v18.16b, v18.16b, v16.16b
+; CHECK-NEXT:    and v19.16b, v19.16b, v16.16b
+; CHECK-NEXT:    and v20.16b, v20.16b, v16.16b
+; CHECK-NEXT:    and v21.16b, v21.16b, v16.16b
+; CHECK-NEXT:    and v22.16b, v22.16b, v16.16b
+; CHECK-NEXT:    add v0.4s, v17.4s, v0.4s
+; CHECK-NEXT:    ushr v17.4s, v7.4s, #16
+; CHECK-NEXT:    add v2.4s, v18.4s, v2.4s
+; CHECK-NEXT:    movi v18.4s, #127, msl #8
+; CHECK-NEXT:    add v4.4s, v19.4s, v4.4s
+; CHECK-NEXT:    add v6.4s, v20.4s, v6.4s
+; CHECK-NEXT:    and v19.16b, v23.16b, v16.16b
+; CHECK-NEXT:    add v20.4s, v22.4s, v3.4s
+; CHECK-NEXT:    and v16.16b, v17.16b, v16.16b
+; CHECK-NEXT:    add v17.4s, v21.4s, v1.4s
+; CHECK-NEXT:    add v5.4s, v19.4s, v5.4s
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v18.4s
+; CHECK-NEXT:    addhn v1.4h, v2.4s, v18.4s
+; CHECK-NEXT:    addhn v2.4h, v4.4s, v18.4s
+; CHECK-NEXT:    add v4.4s, v16.4s, v7.4s
+; CHECK-NEXT:    addhn v3.4h, v6.4s, v18.4s
+; CHECK-NEXT:    addhn2 v0.8h, v17.4s, v18.4s
+; CHECK-NEXT:    addhn2 v1.8h, v20.4s, v18.4s
+; CHECK-NEXT:    addhn2 v2.8h, v5.4s, v18.4s
+; CHECK-NEXT:    addhn2 v3.8h, v4.4s, v18.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <32 x i32> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i32_v32bf16(<32 x i32> %a) {
+; CHECK-LABEL: utofp_v32i32_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ucvtf v2.4s, v2.4s
+; CHECK-NEXT:    ucvtf v4.4s, v4.4s
+; CHECK-NEXT:    ucvtf v6.4s, v6.4s
+; CHECK-NEXT:    movi v16.4s, #1
+; CHECK-NEXT:    ucvtf v1.4s, v1.4s
+; CHECK-NEXT:    ucvtf v3.4s, v3.4s
+; CHECK-NEXT:    ucvtf v5.4s, v5.4s
+; CHECK-NEXT:    ucvtf v7.4s, v7.4s
+; CHECK-NEXT:    ushr v17.4s, v0.4s, #16
+; CHECK-NEXT:    ushr v18.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v19.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v20.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v21.4s, v1.4s, #16
+; CHECK-NEXT:    ushr v22.4s, v3.4s, #16
+; CHECK-NEXT:    ushr v23.4s, v5.4s, #16
+; CHECK-NEXT:    and v17.16b, v17.16b, v16.16b
+; CHECK-NEXT:    and v18.16b, v18.16b, v16.16b
+; CHECK-NEXT:    and v19.16b, v19.16b, v16.16b
+; CHECK-NEXT:    and v20.16b, v20.16b, v16.16b
+; CHECK-NEXT:    and v21.16b, v21.16b, v16.16b
+; CHECK-NEXT:    and v22.16b, v22.16b, v16.16b
+; CHECK-NEXT:    add v0.4s, v17.4s, v0.4s
+; CHECK-NEXT:    ushr v17.4s, v7.4s, #16
+; CHECK-NEXT:    add v2.4s, v18.4s, v2.4s
+; CHECK-NEXT:    movi v18.4s, #127, msl #8
+; CHECK-NEXT:    add v4.4s, v19.4s, v4.4s
+; CHECK-NEXT:    add v6.4s, v20.4s, v6.4s
+; CHECK-NEXT:    and v19.16b, v23.16b, v16.16b
+; CHECK-NEXT:    add v20.4s, v22.4s, v3.4s
+; CHECK-NEXT:    and v16.16b, v17.16b, v16.16b
+; CHECK-NEXT:    add v17.4s, v21.4s, v1.4s
+; CHECK-NEXT:    add v5.4s, v19.4s, v5.4s
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v18.4s
+; CHECK-NEXT:    addhn v1.4h, v2.4s, v18.4s
+; CHECK-NEXT:    addhn v2.4h, v4.4s, v18.4s
+; CHECK-NEXT:    add v4.4s, v16.4s, v7.4s
+; CHECK-NEXT:    addhn v3.4h, v6.4s, v18.4s
+; CHECK-NEXT:    addhn2 v0.8h, v17.4s, v18.4s
+; CHECK-NEXT:    addhn2 v1.8h, v20.4s, v18.4s
+; CHECK-NEXT:    addhn2 v2.8h, v5.4s, v18.4s
+; CHECK-NEXT:    addhn2 v3.8h, v4.4s, v18.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <32 x i32> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}
+
+define <2 x bfloat> @stofp_v2i16_v2bf16(<2 x i16> %a) {
+; CHECK-LABEL: stofp_v2i16_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    sshll v0.4s, v0.4h, #0
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <2 x i16> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i16_v2bf16(<2 x i16> %a) {
+; CHECK-LABEL: utofp_v2i16_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uzp1 v0.4h, v0.4h, v0.4h
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <2 x i16> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i16_v3bf16(<3 x i16> %a) {
+; CHECK-LABEL: stofp_v3i16_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v0.4s, v0.4h, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <3 x i16> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i16_v3bf16(<3 x i16> %a) {
+; CHECK-LABEL: utofp_v3i16_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <3 x i16> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i16_v4bf16(<4 x i16> %a) {
+; CHECK-LABEL: stofp_v4i16_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v0.4s, v0.4h, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <4 x i16> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i16_v4bf16(<4 x i16> %a) {
+; CHECK-LABEL: utofp_v4i16_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <4 x i16> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i16_v8bf16(<8 x i16> %a) {
+; CHECK-LABEL: stofp_v8i16_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v2.4s, v0.4h, #0
+; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    movi v4.4s, #127, msl #8
+; CHECK-NEXT:    scvtf v2.4s, v2.4s
+; CHECK-NEXT:    scvtf v3.4s, v0.4s
+; CHECK-NEXT:    ushr v0.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT:    addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT:    addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <8 x i16> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i16_v8bf16(<8 x i16> %a) {
+; CHECK-LABEL: utofp_v8i16_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v2.4s, v0.4h, #0
+; CHECK-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    movi v4.4s, #127, msl #8
+; CHECK-NEXT:    ucvtf v2.4s, v2.4s
+; CHECK-NEXT:    ucvtf v3.4s, v0.4s
+; CHECK-NEXT:    ushr v0.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT:    addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT:    addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <8 x i16> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i16_v16bf16(<16 x i16> %a) {
+; CHECK-LABEL: stofp_v16i16_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v3.4s, v0.4h, #0
+; CHECK-NEXT:    sshll v4.4s, v1.4h, #0
+; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    sshll2 v1.4s, v1.8h, #0
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    scvtf v3.4s, v3.4s
+; CHECK-NEXT:    scvtf v4.4s, v4.4s
+; CHECK-NEXT:    scvtf v6.4s, v0.4s
+; CHECK-NEXT:    scvtf v7.4s, v1.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    ushr v0.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v16.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v17.4s, v7.4s, #16
+; CHECK-NEXT:    and v5.16b, v5.16b, v2.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and v16.16b, v16.16b, v2.16b
+; CHECK-NEXT:    and v2.16b, v17.16b, v2.16b
+; CHECK-NEXT:    add v5.4s, v5.4s, v1.4s
+; CHECK-NEXT:    add v18.4s, v0.4s, v1.4s
+; CHECK-NEXT:    add v2.4s, v2.4s, v1.4s
+; CHECK-NEXT:    addhn v0.4h, v3.4s, v5.4s
+; CHECK-NEXT:    add v3.4s, v16.4s, v1.4s
+; CHECK-NEXT:    addhn v1.4h, v4.4s, v18.4s
+; CHECK-NEXT:    addhn2 v0.8h, v6.4s, v3.4s
+; CHECK-NEXT:    addhn2 v1.8h, v7.4s, v2.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <16 x i16> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i16_v16bf16(<16 x i16> %a) {
+; CHECK-LABEL: utofp_v16i16_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v3.4s, v0.4h, #0
+; CHECK-NEXT:    ushll v4.4s, v1.4h, #0
+; CHECK-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    ushll2 v1.4s, v1.8h, #0
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    ucvtf v3.4s, v3.4s
+; CHECK-NEXT:    ucvtf v4.4s, v4.4s
+; CHECK-NEXT:    ucvtf v6.4s, v0.4s
+; CHECK-NEXT:    ucvtf v7.4s, v1.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    ushr v0.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v16.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v17.4s, v7.4s, #16
+; CHECK-NEXT:    and v5.16b, v5.16b, v2.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and v16.16b, v16.16b, v2.16b
+; CHECK-NEXT:    and v2.16b, v17.16b, v2.16b
+; CHECK-NEXT:    add v5.4s, v5.4s, v1.4s
+; CHECK-NEXT:    add v18.4s, v0.4s, v1.4s
+; CHECK-NEXT:    add v2.4s, v2.4s, v1.4s
+; CHECK-NEXT:    addhn v0.4h, v3.4s, v5.4s
+; CHECK-NEXT:    add v3.4s, v16.4s, v1.4s
+; CHECK-NEXT:    addhn v1.4h, v4.4s, v18.4s
+; CHECK-NEXT:    addhn2 v0.8h, v6.4s, v3.4s
+; CHECK-NEXT:    addhn2 v1.8h, v7.4s, v2.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <16 x i16> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i16_v32bf16(<32 x i16> %a) {
+; CHECK-LABEL: stofp_v32i16_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v5.4s, v1.4h, #0
+; CHECK-NEXT:    sshll v4.4s, v0.4h, #0
+; CHECK-NEXT:    sshll v6.4s, v2.4h, #0
+; CHECK-NEXT:    sshll v7.4s, v3.4h, #0
+; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    movi v16.4s, #1
+; CHECK-NEXT:    sshll2 v1.4s, v1.8h, #0
+; CHECK-NEXT:    sshll2 v2.4s, v2.8h, #0
+; CHECK-NEXT:    sshll2 v3.4s, v3.8h, #0
+; CHECK-NEXT:    scvtf v5.4s, v5.4s
+; CHECK-NEXT:    scvtf v4.4s, v4.4s
+; CHECK-NEXT:    scvtf v6.4s, v6.4s
+; CHECK-NEXT:    scvtf v7.4s, v7.4s
+; CHECK-NEXT:    scvtf v19.4s, v0.4s
+; CHECK-NEXT:    movi v18.4s, #127, msl #8
+; CHECK-NEXT:    scvtf v20.4s, v1.4s
+; CHECK-NEXT:    scvtf v21.4s, v2.4s
+; CHECK-NEXT:    scvtf v22.4s, v3.4s
+; CHECK-NEXT:    ushr v0.4s, v5.4s, #16
+; CHECK-NEXT:    ushr v17.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v1.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v2.4s, v7.4s, #16
+; CHECK-NEXT:    ushr v23.4s, v20.4s, #16
+; CHECK-NEXT:    ushr v25.4s, v22.4s, #16
+; CHECK-NEXT:    and v0.16b, v0.16b, v16.16b
+; CHECK-NEXT:    and v3.16b, v17.16b, v16.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v16.16b
+; CHECK-NEXT:    and v2.16b, v2.16b, v16.16b
+; CHECK-NEXT:    ushr v17.4s, v19.4s, #16
+; CHECK-NEXT:    and v23.16b, v23.16b, v16.16b
+; CHECK-NEXT:    add v24.4s, v0.4s, v18.4s
+; CHECK-NEXT:    ushr v0.4s, v21.4s, #16
+; CHECK-NEXT:    add v3.4s, v3.4s, v18.4s
+; CHECK-NEXT:    add v26.4s, v1.4s, v18.4s
+; CHECK-NEXT:    add v27.4s, v2.4s, v18.4s
+; CHECK-NEXT:    and v17.16b, v17.16b, v16.16b
+; CHECK-NEXT:    and v28.16b, v0.16b, v16.16b
+; CHECK-NEXT:    and v16.16b, v25.16b, v16.16b
+; CHECK-NEXT:    addhn v0.4h, v4.4s, v3.4s
+; CHECK-NEXT:    addhn v1.4h, v5.4s, v24.4s
+; CHECK-NEXT:    add v4.4s, v17.4s, v18.4s
+; CHECK-NEXT:    addhn v2.4h, v6.4s, v26.4s
+; CHECK-NEXT:    add v5.4s, v23.4s, v18.4s
+; CHECK-NEXT:    addhn v3.4h, v7.4s, v27.4s
+; CHECK-NEXT:    add v6.4s, v28.4s, v18.4s
+; CHECK-NEXT:    add v16.4s, v16.4s, v18.4s
+; CHECK-NEXT:    addhn2 v0.8h, v19.4s, v4.4s
+; CHECK-NEXT:    addhn2 v1.8h, v20.4s, v5.4s
+; CHECK-NEXT:    addhn2 v2.8h, v21.4s, v6.4s
+; CHECK-NEXT:    addhn2 v3.8h, v22.4s, v16.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <32 x i16> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i16_v32bf16(<32 x i16> %a) {
+; CHECK-LABEL: utofp_v32i16_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v5.4s, v1.4h, #0
+; CHECK-NEXT:    ushll v4.4s, v0.4h, #0
+; CHECK-NEXT:    ushll v6.4s, v2.4h, #0
+; CHECK-NEXT:    ushll v7.4s, v3.4h, #0
+; CHECK-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    movi v16.4s, #1
+; CHECK-NEXT:    ushll2 v1.4s, v1.8h, #0
+; CHECK-NEXT:    ushll2 v2.4s, v2.8h, #0
+; CHECK-NEXT:    ushll2 v3.4s, v3.8h, #0
+; CHECK-NEXT:    ucvtf v5.4s, v5.4s
+; CHECK-NEXT:    ucvtf v4.4s, v4.4s
+; CHECK-NEXT:    ucvtf v6.4s, v6.4s
+; CHECK-NEXT:    ucvtf v7.4s, v7.4s
+; CHECK-NEXT:    ucvtf v19.4s, v0.4s
+; CHECK-NEXT:    movi v18.4s, #127, msl #8
+; CHECK-NEXT:    ucvtf v20.4s, v1.4s
+; CHECK-NEXT:    ucvtf v21.4s, v2.4s
+; CHECK-NEXT:    ucvtf v22.4s, v3.4s
+; CHECK-NEXT:    ushr v0.4s, v5.4s, #16
+; CHECK-NEXT:    ushr v17.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v1.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v2.4s, v7.4s, #16
+; CHECK-NEXT:    ushr v23.4s, v20.4s, #16
+; CHECK-NEXT:    ushr v25.4s, v22.4s, #16
+; CHECK-NEXT:    and v0.16b, v0.16b, v16.16b
+; CHECK-NEXT:    and v3.16b, v17.16b, v16.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v16.16b
+; CHECK-NEXT:    and v2.16b, v2.16b, v16.16b
+; CHECK-NEXT:    ushr v17.4s, v19.4s, #16
+; CHECK-NEXT:    and v23.16b, v23.16b, v16.16b
+; CHECK-NEXT:    add v24.4s, v0.4s, v18.4s
+; CHECK-NEXT:    ushr v0.4s, v21.4s, #16
+; CHECK-NEXT:    add v3.4s, v3.4s, v18.4s
+; CHECK-NEXT:    add v26.4s, v1.4s, v18.4s
+; CHECK-NEXT:    add v27.4s, v2.4s, v18.4s
+; CHECK-NEXT:    and v17.16b, v17.16b, v16.16b
+; CHECK-NEXT:    and v28.16b, v0.16b, v16.16b
+; CHECK-NEXT:    and v16.16b, v25.16b, v16.16b
+; CHECK-NEXT:    addhn v0.4h, v4.4s, v3.4s
+; CHECK-NEXT:    addhn v1.4h, v5.4s, v24.4s
+; CHECK-NEXT:    add v4.4s, v17.4s, v18.4s
+; CHECK-NEXT:    addhn v2.4h, v6.4s, v26.4s
+; CHECK-NEXT:    add v5.4s, v23.4s, v18.4s
+; CHECK-NEXT:    addhn v3.4h, v7.4s, v27.4s
+; CHECK-NEXT:    add v6.4s, v28.4s, v18.4s
+; CHECK-NEXT:    add v16.4s, v16.4s, v18.4s
+; CHECK-NEXT:    addhn2 v0.8h, v19.4s, v4.4s
+; CHECK-NEXT:    addhn2 v1.8h, v20.4s, v5.4s
+; CHECK-NEXT:    addhn2 v2.8h, v21.4s, v6.4s
+; CHECK-NEXT:    addhn2 v3.8h, v22.4s, v16.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <32 x i16> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}
+
+define <2 x bfloat> @stofp_v2i8_v2bf16(<2 x i8> %a) {
+; CHECK-LABEL: stofp_v2i8_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w9, v0.s[1]
+; CHECK-NEXT:    fmov w10, s0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    sxtb w10, w10
+; CHECK-NEXT:    sxtb w9, w9
+; CHECK-NEXT:    scvtf s1, w10
+; CHECK-NEXT:    scvtf s0, w9
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w12, w10, #16, #1
+; CHECK-NEXT:    ubfx w11, w9, #16, #1
+; CHECK-NEXT:    add w9, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    add w8, w12, w8
+; CHECK-NEXT:    add w9, w11, w9
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    lsr w9, w9, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    fmov s1, w9
+; CHECK-NEXT:    mov v0.h[1], v1.h[0]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <2 x i8> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <2 x bfloat> @utofp_v2i8_v2bf16(<2 x i8> %a) {
+; CHECK-LABEL: utofp_v2i8_v2bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov w9, v0.s[1]
+; CHECK-NEXT:    fmov w10, s0
+; CHECK-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-NEXT:    and w10, w10, #0xff
+; CHECK-NEXT:    and w9, w9, #0xff
+; CHECK-NEXT:    ucvtf s1, w10
+; CHECK-NEXT:    ucvtf s0, w9
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    ubfx w12, w10, #16, #1
+; CHECK-NEXT:    ubfx w11, w9, #16, #1
+; CHECK-NEXT:    add w9, w9, w8
+; CHECK-NEXT:    add w8, w10, w8
+; CHECK-NEXT:    add w8, w12, w8
+; CHECK-NEXT:    add w9, w11, w9
+; CHECK-NEXT:    lsr w8, w8, #16
+; CHECK-NEXT:    lsr w9, w9, #16
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    fmov s1, w9
+; CHECK-NEXT:    mov v0.h[1], v1.h[0]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <2 x i8> %a to <2 x bfloat>
+  ret <2 x bfloat> %c
+}
+
+define <3 x bfloat> @stofp_v3i8_v3bf16(<3 x i8> %a) {
+; CHECK-LABEL: stofp_v3i8_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    mov v0.h[1], w1
+; CHECK-NEXT:    mov v0.h[2], w2
+; CHECK-NEXT:    shl v0.4h, v0.4h, #8
+; CHECK-NEXT:    sshr v0.4h, v0.4h, #8
+; CHECK-NEXT:    sshll v0.4s, v0.4h, #0
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <3 x i8> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <3 x bfloat> @utofp_v3i8_v3bf16(<3 x i8> %a) {
+; CHECK-LABEL: utofp_v3i8_v3bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    fmov s0, w0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    mov v0.h[1], w1
+; CHECK-NEXT:    mov v0.h[2], w2
+; CHECK-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <3 x i8> %a to <3 x bfloat>
+  ret <3 x bfloat> %c
+}
+
+define <4 x bfloat> @stofp_v4i8_v4bf16(<4 x i8> %a) {
+; CHECK-LABEL: stofp_v4i8_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    shl v0.4h, v0.4h, #8
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    sshr v0.4h, v0.4h, #8
+; CHECK-NEXT:    sshll v0.4s, v0.4h, #0
+; CHECK-NEXT:    scvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <4 x i8> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <4 x bfloat> @utofp_v4i8_v4bf16(<4 x i8> %a) {
+; CHECK-LABEL: utofp_v4i8_v4bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    ushll v0.4s, v0.4h, #0
+; CHECK-NEXT:    ucvtf v0.4s, v0.4s
+; CHECK-NEXT:    ushr v2.4s, v0.4s, #16
+; CHECK-NEXT:    and v1.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v1.4s, v0.4s
+; CHECK-NEXT:    movi v1.4s, #127, msl #8
+; CHECK-NEXT:    addhn v0.4h, v0.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <4 x i8> %a to <4 x bfloat>
+  ret <4 x bfloat> %c
+}
+
+define <8 x bfloat> @stofp_v8i8_v8bf16(<8 x i8> %a) {
+; CHECK-LABEL: stofp_v8i8_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    movi v4.4s, #127, msl #8
+; CHECK-NEXT:    sshll v2.4s, v0.4h, #0
+; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    scvtf v2.4s, v2.4s
+; CHECK-NEXT:    scvtf v3.4s, v0.4s
+; CHECK-NEXT:    ushr v0.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT:    addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT:    addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <8 x i8> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <8 x bfloat> @utofp_v8i8_v8bf16(<8 x i8> %a) {
+; CHECK-LABEL: utofp_v8i8_v8bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    movi v4.4s, #127, msl #8
+; CHECK-NEXT:    ushll v2.4s, v0.4h, #0
+; CHECK-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    ucvtf v2.4s, v2.4s
+; CHECK-NEXT:    ucvtf v3.4s, v0.4s
+; CHECK-NEXT:    ushr v0.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and v1.16b, v5.16b, v1.16b
+; CHECK-NEXT:    add v0.4s, v0.4s, v4.4s
+; CHECK-NEXT:    add v1.4s, v1.4s, v4.4s
+; CHECK-NEXT:    addhn v0.4h, v2.4s, v0.4s
+; CHECK-NEXT:    addhn2 v0.8h, v3.4s, v1.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <8 x i8> %a to <8 x bfloat>
+  ret <8 x bfloat> %c
+}
+
+define <16 x bfloat> @stofp_v16i8_v16bf16(<16 x i8> %a) {
+; CHECK-LABEL: stofp_v16i8_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll2 v2.8h, v0.16b, #0
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    movi v7.4s, #127, msl #8
+; CHECK-NEXT:    sshll v3.4s, v2.4h, #0
+; CHECK-NEXT:    sshll v4.4s, v0.4h, #0
+; CHECK-NEXT:    sshll2 v2.4s, v2.8h, #0
+; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    scvtf v3.4s, v3.4s
+; CHECK-NEXT:    scvtf v4.4s, v4.4s
+; CHECK-NEXT:    scvtf v2.4s, v2.4s
+; CHECK-NEXT:    scvtf v6.4s, v0.4s
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    ushr v0.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v16.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v17.4s, v6.4s, #16
+; CHECK-NEXT:    and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and v16.16b, v16.16b, v1.16b
+; CHECK-NEXT:    and v17.16b, v17.16b, v1.16b
+; CHECK-NEXT:    add v5.4s, v5.4s, v7.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v7.4s
+; CHECK-NEXT:    addhn v1.4h, v3.4s, v5.4s
+; CHECK-NEXT:    add v3.4s, v16.4s, v7.4s
+; CHECK-NEXT:    add v5.4s, v17.4s, v7.4s
+; CHECK-NEXT:    addhn v0.4h, v4.4s, v0.4s
+; CHECK-NEXT:    addhn2 v1.8h, v2.4s, v3.4s
+; CHECK-NEXT:    addhn2 v0.8h, v6.4s, v5.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <16 x i8> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <16 x bfloat> @utofp_v16i8_v16bf16(<16 x i8> %a) {
+; CHECK-LABEL: utofp_v16i8_v16bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll2 v2.8h, v0.16b, #0
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    movi v1.4s, #1
+; CHECK-NEXT:    movi v7.4s, #127, msl #8
+; CHECK-NEXT:    ushll v3.4s, v2.4h, #0
+; CHECK-NEXT:    ushll v4.4s, v0.4h, #0
+; CHECK-NEXT:    ushll2 v2.4s, v2.8h, #0
+; CHECK-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    ucvtf v3.4s, v3.4s
+; CHECK-NEXT:    ucvtf v4.4s, v4.4s
+; CHECK-NEXT:    ucvtf v2.4s, v2.4s
+; CHECK-NEXT:    ucvtf v6.4s, v0.4s
+; CHECK-NEXT:    ushr v5.4s, v3.4s, #16
+; CHECK-NEXT:    ushr v0.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v16.4s, v2.4s, #16
+; CHECK-NEXT:    ushr v17.4s, v6.4s, #16
+; CHECK-NEXT:    and v5.16b, v5.16b, v1.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    and v16.16b, v16.16b, v1.16b
+; CHECK-NEXT:    and v17.16b, v17.16b, v1.16b
+; CHECK-NEXT:    add v5.4s, v5.4s, v7.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v7.4s
+; CHECK-NEXT:    addhn v1.4h, v3.4s, v5.4s
+; CHECK-NEXT:    add v3.4s, v16.4s, v7.4s
+; CHECK-NEXT:    add v5.4s, v17.4s, v7.4s
+; CHECK-NEXT:    addhn v0.4h, v4.4s, v0.4s
+; CHECK-NEXT:    addhn2 v1.8h, v2.4s, v3.4s
+; CHECK-NEXT:    addhn2 v0.8h, v6.4s, v5.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <16 x i8> %a to <16 x bfloat>
+  ret <16 x bfloat> %c
+}
+
+define <32 x bfloat> @stofp_v32i8_v32bf16(<32 x i8> %a) {
+; CHECK-LABEL: stofp_v32i8_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sshll2 v3.8h, v0.16b, #0
+; CHECK-NEXT:    sshll2 v4.8h, v1.16b, #0
+; CHECK-NEXT:    sshll v0.8h, v0.8b, #0
+; CHECK-NEXT:    sshll v1.8h, v1.8b, #0
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    movi v20.4s, #127, msl #8
+; CHECK-NEXT:    sshll v5.4s, v3.4h, #0
+; CHECK-NEXT:    sshll v6.4s, v4.4h, #0
+; CHECK-NEXT:    sshll v7.4s, v0.4h, #0
+; CHECK-NEXT:    sshll v16.4s, v1.4h, #0
+; CHECK-NEXT:    sshll2 v3.4s, v3.8h, #0
+; CHECK-NEXT:    sshll2 v4.4s, v4.8h, #0
+; CHECK-NEXT:    sshll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    sshll2 v1.4s, v1.8h, #0
+; CHECK-NEXT:    scvtf v5.4s, v5.4s
+; CHECK-NEXT:    scvtf v6.4s, v6.4s
+; CHECK-NEXT:    scvtf v7.4s, v7.4s
+; CHECK-NEXT:    scvtf v16.4s, v16.4s
+; CHECK-NEXT:    scvtf v17.4s, v3.4s
+; CHECK-NEXT:    scvtf v4.4s, v4.4s
+; CHECK-NEXT:    scvtf v19.4s, v0.4s
+; CHECK-NEXT:    scvtf v21.4s, v1.4s
+; CHECK-NEXT:    ushr v3.4s, v5.4s, #16
+; CHECK-NEXT:    ushr v18.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v0.4s, v7.4s, #16
+; CHECK-NEXT:    ushr v1.4s, v16.4s, #16
+; CHECK-NEXT:    ushr v22.4s, v17.4s, #16
+; CHECK-NEXT:    ushr v23.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v24.4s, v19.4s, #16
+; CHECK-NEXT:    ushr v25.4s, v21.4s, #16
+; CHECK-NEXT:    and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT:    and v18.16b, v18.16b, v2.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    and v22.16b, v22.16b, v2.16b
+; CHECK-NEXT:    and v23.16b, v23.16b, v2.16b
+; CHECK-NEXT:    and v24.16b, v24.16b, v2.16b
+; CHECK-NEXT:    and v2.16b, v25.16b, v2.16b
+; CHECK-NEXT:    add v3.4s, v3.4s, v20.4s
+; CHECK-NEXT:    add v18.4s, v18.4s, v20.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v20.4s
+; CHECK-NEXT:    add v26.4s, v1.4s, v20.4s
+; CHECK-NEXT:    addhn v1.4h, v5.4s, v3.4s
+; CHECK-NEXT:    addhn v3.4h, v6.4s, v18.4s
+; CHECK-NEXT:    addhn v0.4h, v7.4s, v0.4s
+; CHECK-NEXT:    add v5.4s, v22.4s, v20.4s
+; CHECK-NEXT:    add v6.4s, v24.4s, v20.4s
+; CHECK-NEXT:    add v7.4s, v23.4s, v20.4s
+; CHECK-NEXT:    add v18.4s, v2.4s, v20.4s
+; CHECK-NEXT:    addhn v2.4h, v16.4s, v26.4s
+; CHECK-NEXT:    addhn2 v0.8h, v19.4s, v6.4s
+; CHECK-NEXT:    addhn2 v1.8h, v17.4s, v5.4s
+; CHECK-NEXT:    addhn2 v3.8h, v4.4s, v7.4s
+; CHECK-NEXT:    addhn2 v2.8h, v21.4s, v18.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = sitofp <32 x i8> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}
+
+define <32 x bfloat> @utofp_v32i8_v32bf16(<32 x i8> %a) {
+; CHECK-LABEL: utofp_v32i8_v32bf16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    ushll2 v3.8h, v0.16b, #0
+; CHECK-NEXT:    ushll2 v4.8h, v1.16b, #0
+; CHECK-NEXT:    ushll v0.8h, v0.8b, #0
+; CHECK-NEXT:    ushll v1.8h, v1.8b, #0
+; CHECK-NEXT:    movi v2.4s, #1
+; CHECK-NEXT:    movi v20.4s, #127, msl #8
+; CHECK-NEXT:    ushll v5.4s, v3.4h, #0
+; CHECK-NEXT:    ushll v6.4s, v4.4h, #0
+; CHECK-NEXT:    ushll v7.4s, v0.4h, #0
+; CHECK-NEXT:    ushll v16.4s, v1.4h, #0
+; CHECK-NEXT:    ushll2 v3.4s, v3.8h, #0
+; CHECK-NEXT:    ushll2 v4.4s, v4.8h, #0
+; CHECK-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-NEXT:    ushll2 v1.4s, v1.8h, #0
+; CHECK-NEXT:    ucvtf v5.4s, v5.4s
+; CHECK-NEXT:    ucvtf v6.4s, v6.4s
+; CHECK-NEXT:    ucvtf v7.4s, v7.4s
+; CHECK-NEXT:    ucvtf v16.4s, v16.4s
+; CHECK-NEXT:    ucvtf v17.4s, v3.4s
+; CHECK-NEXT:    ucvtf v4.4s, v4.4s
+; CHECK-NEXT:    ucvtf v19.4s, v0.4s
+; CHECK-NEXT:    ucvtf v21.4s, v1.4s
+; CHECK-NEXT:    ushr v3.4s, v5.4s, #16
+; CHECK-NEXT:    ushr v18.4s, v6.4s, #16
+; CHECK-NEXT:    ushr v0.4s, v7.4s, #16
+; CHECK-NEXT:    ushr v1.4s, v16.4s, #16
+; CHECK-NEXT:    ushr v22.4s, v17.4s, #16
+; CHECK-NEXT:    ushr v23.4s, v4.4s, #16
+; CHECK-NEXT:    ushr v24.4s, v19.4s, #16
+; CHECK-NEXT:    ushr v25.4s, v21.4s, #16
+; CHECK-NEXT:    and v3.16b, v3.16b, v2.16b
+; CHECK-NEXT:    and v18.16b, v18.16b, v2.16b
+; CHECK-NEXT:    and v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    and v1.16b, v1.16b, v2.16b
+; CHECK-NEXT:    and v22.16b, v22.16b, v2.16b
+; CHECK-NEXT:    and v23.16b, v23.16b, v2.16b
+; CHECK-NEXT:    and v24.16b, v24.16b, v2.16b
+; CHECK-NEXT:    and v2.16b, v25.16b, v2.16b
+; CHECK-NEXT:    add v3.4s, v3.4s, v20.4s
+; CHECK-NEXT:    add v18.4s, v18.4s, v20.4s
+; CHECK-NEXT:    add v0.4s, v0.4s, v20.4s
+; CHECK-NEXT:    add v26.4s, v1.4s, v20.4s
+; CHECK-NEXT:    addhn v1.4h, v5.4s, v3.4s
+; CHECK-NEXT:    addhn v3.4h, v6.4s, v18.4s
+; CHECK-NEXT:    addhn v0.4h, v7.4s, v0.4s
+; CHECK-NEXT:    add v5.4s, v22.4s, v20.4s
+; CHECK-NEXT:    add v6.4s, v24.4s, v20.4s
+; CHECK-NEXT:    add v7.4s, v23.4s, v20.4s
+; CHECK-NEXT:    add v18.4s, v2.4s, v20.4s
+; CHECK-NEXT:    addhn v2.4h, v16.4s, v26.4s
+; CHECK-NEXT:    addhn2 v0.8h, v19.4s, v6.4s
+; CHECK-NEXT:    addhn2 v1.8h, v17.4s, v5.4s
+; CHECK-NEXT:    addhn2 v3.8h, v4.4s, v7.4s
+; CHECK-NEXT:    addhn2 v2.8h, v21.4s, v18.4s
+; CHECK-NEXT:    ret
+entry:
+  %c = uitofp <32 x i8> %a to <32 x bfloat>
+  ret <32 x bfloat> %c
+}


        


More information about the llvm-commits mailing list