[llvm] [RISCV] Promote fixed-length bf16 arith vector ops with zvfbfmin (PR #112393)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 15 09:30:09 PDT 2024
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/112393
>From 1fe1e5e50a3884a47b111e18e8eb77703511f147 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 15 Oct 2024 16:37:35 +0100
Subject: [PATCH 1/3] Precommit tests
---
.../CodeGen/RISCV/rvv/fixed-vectors-fp.ll | 33311 +++++++++++++++-
1 file changed, 33228 insertions(+), 83 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 7ecf8af54c8dc0..ff00aaf45fcf1d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1,8 +1,1013 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64
+
+
+define void @fadd_v8bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fadd_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fadd_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fadd <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fadd_v6bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fadd_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fadd_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fadd <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
define void @fadd_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fadd_v8f16:
@@ -97,6 +1102,1010 @@ define void @fadd_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fsub_v8bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fsub_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fsub_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fsub <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fsub_v6bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fsub_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fsub_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fsub <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fsub_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fsub_v8f16:
; ZVFH: # %bb.0:
@@ -190,6 +2199,1010 @@ define void @fsub_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fmul_v8bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fmul_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmul_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fmul <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fmul_v6bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fmul_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmul_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fmul <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fmul_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fmul_v8f16:
; ZVFH: # %bb.0:
@@ -283,6 +3296,1010 @@ define void @fmul_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fdiv_v8bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fdiv_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fdiv_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fdiv <8 x bfloat> %a, %b
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @fdiv_v6bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fdiv_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fdiv_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fdiv <6 x bfloat> %a, %b
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fdiv_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fdiv_v8f16:
; ZVFH: # %bb.0:
@@ -376,6 +4393,716 @@ define void @fdiv_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @fneg_v8bf16(ptr %x) {
+; RV32-LABEL: fneg_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s1, 524288
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fneg_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = fneg <8 x bfloat> %a
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @fneg_v6bf16(ptr %x) {
+; RV32-LABEL: fneg_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s1, 524288
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fneg_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = fneg <6 x bfloat> %a
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @fneg_v8f16(ptr %x) {
; ZVFH-LABEL: fneg_v8f16:
; ZVFH: # %bb.0:
@@ -450,6 +5177,712 @@ define void @fneg_v2f64(ptr %x) {
ret void
}
+define void @fabs_v8bf16(ptr %x) {
+; RV32-LABEL: fabs_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fabs_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.fabs.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @fabs_v6bf16(ptr %x) {
+; RV32-LABEL: fabs_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 17
+; RV32-NEXT: srli a0, a0, 1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fabs_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.fabs.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @fabs_v8f16(ptr %x) {
; ZVFH-LABEL: fabs_v8f16:
; ZVFH: # %bb.0:
@@ -473,7 +5906,6 @@ define void @fabs_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
define void @fabs_v6f16(ptr %x) {
; ZVFH-LABEL: fabs_v6f16:
@@ -498,7 +5930,6 @@ define void @fabs_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.fabs.v6f16(<6 x half>)
define void @fabs_v4f32(ptr %x) {
; CHECK-LABEL: fabs_v4f32:
@@ -513,7 +5944,6 @@ define void @fabs_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
define void @fabs_v2f64(ptr %x) {
; CHECK-LABEL: fabs_v2f64:
@@ -528,7 +5958,324 @@ define void @fabs_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.fabs.v2f64(<2 x double>)
+
+define void @copysign_v8bf16(ptr %x, ptr %y) {
+; RV32-LABEL: copysign_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vslidedown.vi v10, v8, 1
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: lui a1, 1048568
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v10, v9, 1
+; RV32-NEXT: vmv.x.s a3, v10
+; RV32-NEXT: lui a4, 8
+; RV32-NEXT: addi a5, a4, -1
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vmv.x.s a3, v8
+; RV32-NEXT: and a3, a3, a4
+; RV32-NEXT: vmv.x.s a6, v9
+; RV32-NEXT: and a6, a6, a5
+; RV32-NEXT: or a3, a6, a3
+; RV32-NEXT: vmv.v.x v10, a3
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 2
+; RV32-NEXT: vmv.x.s a2, v11
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v11, v9, 2
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 3
+; RV32-NEXT: vmv.x.s a2, v11
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v11, v9, 3
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 5
+; RV32-NEXT: vmv.x.s a2, v11
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v11, v9, 5
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 4
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a4
+; RV32-NEXT: vslidedown.vi v11, v9, 4
+; RV32-NEXT: vmv.x.s a4, v11
+; RV32-NEXT: and a4, a4, a5
+; RV32-NEXT: or a3, a4, a3
+; RV32-NEXT: vmv.v.x v11, a3
+; RV32-NEXT: vslide1down.vx v11, v11, a2
+; RV32-NEXT: vslidedown.vi v12, v8, 6
+; RV32-NEXT: vmv.x.s a2, v12
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v12, v9, 6
+; RV32-NEXT: vmv.x.s a3, v12
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslide1down.vx v11, v11, a2
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a2, v8
+; RV32-NEXT: and a1, a2, a1
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a2, v8
+; RV32-NEXT: and a2, a2, a5
+; RV32-NEXT: or a1, a2, a1
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v8, v11, a1
+; RV32-NEXT: vslidedown.vi v8, v10, 4, v0.t
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vslidedown.vi v10, v8, 1
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: lui a1, 1048568
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v10, v9, 1
+; RV64-NEXT: vmv.x.s a3, v10
+; RV64-NEXT: lui a4, 8
+; RV64-NEXT: addiw a5, a4, -1
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vmv.x.s a3, v8
+; RV64-NEXT: and a3, a3, a4
+; RV64-NEXT: vmv.x.s a6, v9
+; RV64-NEXT: and a6, a6, a5
+; RV64-NEXT: or a3, a6, a3
+; RV64-NEXT: vmv.v.x v10, a3
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 2
+; RV64-NEXT: vmv.x.s a2, v11
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v11, v9, 2
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-NEXT: vmv.x.s a2, v11
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v11, v9, 3
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 5
+; RV64-NEXT: vmv.x.s a2, v11
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v11, v9, 5
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 4
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a4
+; RV64-NEXT: vslidedown.vi v11, v9, 4
+; RV64-NEXT: vmv.x.s a4, v11
+; RV64-NEXT: and a4, a4, a5
+; RV64-NEXT: or a3, a4, a3
+; RV64-NEXT: vmv.v.x v11, a3
+; RV64-NEXT: vslide1down.vx v11, v11, a2
+; RV64-NEXT: vslidedown.vi v12, v8, 6
+; RV64-NEXT: vmv.x.s a2, v12
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v12, v9, 6
+; RV64-NEXT: vmv.x.s a3, v12
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslide1down.vx v11, v11, a2
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a2, v8
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: vslidedown.vi v8, v9, 7
+; RV64-NEXT: vmv.x.s a2, v8
+; RV64-NEXT: and a2, a2, a5
+; RV64-NEXT: or a1, a2, a1
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v8, v11, a1
+; RV64-NEXT: vslidedown.vi v8, v10, 4, v0.t
+; RV64-NEXT: vse16.v v8, (a0)
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b)
+ store <8 x bfloat> %c, ptr %x
+ ret void
+}
+
+define void @copysign_v6bf16(ptr %x, ptr %y) {
+; RV32-LABEL: copysign_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: vslidedown.vi v10, v8, 1
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: lui a1, 1048568
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v10, v9, 1
+; RV32-NEXT: vmv.x.s a3, v10
+; RV32-NEXT: lui a4, 8
+; RV32-NEXT: addi a5, a4, -1
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vmv.x.s a3, v8
+; RV32-NEXT: and a3, a3, a4
+; RV32-NEXT: vmv.x.s a6, v9
+; RV32-NEXT: and a6, a6, a5
+; RV32-NEXT: or a3, a6, a3
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v10, a3
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 2
+; RV32-NEXT: vmv.x.s a2, v11
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v11, v9, 2
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 3
+; RV32-NEXT: vmv.x.s a2, v11
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v11, v9, 3
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 5
+; RV32-NEXT: vmv.x.s a2, v11
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v11, v9, 5
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 4
+; RV32-NEXT: vmv.x.s a3, v11
+; RV32-NEXT: and a3, a3, a4
+; RV32-NEXT: vslidedown.vi v11, v9, 4
+; RV32-NEXT: vmv.x.s a4, v11
+; RV32-NEXT: and a4, a4, a5
+; RV32-NEXT: or a3, a4, a3
+; RV32-NEXT: vmv.v.x v11, a3
+; RV32-NEXT: vslide1down.vx v11, v11, a2
+; RV32-NEXT: vslidedown.vi v12, v8, 6
+; RV32-NEXT: vmv.x.s a2, v12
+; RV32-NEXT: and a2, a2, a1
+; RV32-NEXT: vslidedown.vi v12, v9, 6
+; RV32-NEXT: vmv.x.s a3, v12
+; RV32-NEXT: and a3, a3, a5
+; RV32-NEXT: or a2, a3, a2
+; RV32-NEXT: vslide1down.vx v11, v11, a2
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a2, v8
+; RV32-NEXT: and a1, a2, a1
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a2, v8
+; RV32-NEXT: and a2, a2, a5
+; RV32-NEXT: or a1, a2, a1
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v8, v11, a1
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v10, 4, v0.t
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: vslidedown.vi v10, v8, 1
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: lui a1, 1048568
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v10, v9, 1
+; RV64-NEXT: vmv.x.s a3, v10
+; RV64-NEXT: lui a4, 8
+; RV64-NEXT: addiw a5, a4, -1
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vmv.x.s a3, v8
+; RV64-NEXT: and a3, a3, a4
+; RV64-NEXT: vmv.x.s a6, v9
+; RV64-NEXT: and a6, a6, a5
+; RV64-NEXT: or a3, a6, a3
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v10, a3
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 2
+; RV64-NEXT: vmv.x.s a2, v11
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v11, v9, 2
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 3
+; RV64-NEXT: vmv.x.s a2, v11
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v11, v9, 3
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 5
+; RV64-NEXT: vmv.x.s a2, v11
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v11, v9, 5
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 4
+; RV64-NEXT: vmv.x.s a3, v11
+; RV64-NEXT: and a3, a3, a4
+; RV64-NEXT: vslidedown.vi v11, v9, 4
+; RV64-NEXT: vmv.x.s a4, v11
+; RV64-NEXT: and a4, a4, a5
+; RV64-NEXT: or a3, a4, a3
+; RV64-NEXT: vmv.v.x v11, a3
+; RV64-NEXT: vslide1down.vx v11, v11, a2
+; RV64-NEXT: vslidedown.vi v12, v8, 6
+; RV64-NEXT: vmv.x.s a2, v12
+; RV64-NEXT: and a2, a2, a1
+; RV64-NEXT: vslidedown.vi v12, v9, 6
+; RV64-NEXT: vmv.x.s a3, v12
+; RV64-NEXT: and a3, a3, a5
+; RV64-NEXT: or a2, a3, a2
+; RV64-NEXT: vslide1down.vx v11, v11, a2
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a2, v8
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: vslidedown.vi v8, v9, 7
+; RV64-NEXT: vmv.x.s a2, v8
+; RV64-NEXT: and a2, a2, a5
+; RV64-NEXT: or a1, a2, a1
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v8, v11, a1
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v10, 4, v0.t
+; RV64-NEXT: vse16.v v8, (a0)
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = call <6 x bfloat> @llvm.copysign.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b)
+ store <6 x bfloat> %c, ptr %x
+ ret void
+}
define void @copysign_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_v8f16:
@@ -558,7 +6305,6 @@ define void @copysign_v8f16(ptr %x, ptr %y) {
store <8 x half> %c, ptr %x
ret void
}
-declare <8 x half> @llvm.copysign.v8f16(<8 x half>, <8 x half>)
define void @copysign_v6f16(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_v6f16:
@@ -590,7 +6336,6 @@ define void @copysign_v6f16(ptr %x, ptr %y) {
store <6 x half> %c, ptr %x
ret void
}
-declare <6 x half> @llvm.copysign.v6f16(<6 x half>, <6 x half>)
define void @copysign_v4f32(ptr %x, ptr %y) {
; CHECK-LABEL: copysign_v4f32:
@@ -607,7 +6352,6 @@ define void @copysign_v4f32(ptr %x, ptr %y) {
store <4 x float> %c, ptr %x
ret void
}
-declare <4 x float> @llvm.copysign.v4f32(<4 x float>, <4 x float>)
define void @copysign_v2f64(ptr %x, ptr %y) {
; CHECK-LABEL: copysign_v2f64:
@@ -624,7 +6368,234 @@ define void @copysign_v2f64(ptr %x, ptr %y) {
store <2 x double> %c, ptr %x
ret void
}
-declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>)
+
+define void @copysign_vf_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: copysign_vf_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: fmv.x.w a1, fa0
+; RV32-NEXT: lui a2, 1048568
+; RV32-NEXT: and a1, a1, a2
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vmv.x.s a2, v9
+; RV32-NEXT: lui a3, 8
+; RV32-NEXT: addi a3, a3, -1
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vmv.x.s a4, v8
+; RV32-NEXT: and a4, a4, a3
+; RV32-NEXT: or a4, a4, a1
+; RV32-NEXT: vmv.v.x v9, a4
+; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vslidedown.vi v10, v8, 3
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vslidedown.vi v10, v8, 5
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vslidedown.vi v10, v8, 4
+; RV32-NEXT: vmv.x.s a4, v10
+; RV32-NEXT: and a4, a4, a3
+; RV32-NEXT: or a4, a4, a1
+; RV32-NEXT: vmv.v.x v10, a4
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 6
+; RV32-NEXT: vmv.x.s a2, v11
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a2, v8
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a1, a2, a1
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v8, v10, a1
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_vf_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: fmv.x.w a1, fa0
+; RV64-NEXT: lui a2, 1048568
+; RV64-NEXT: and a1, a1, a2
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vmv.x.s a2, v9
+; RV64-NEXT: lui a3, 8
+; RV64-NEXT: addiw a3, a3, -1
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vmv.x.s a4, v8
+; RV64-NEXT: and a4, a4, a3
+; RV64-NEXT: or a4, a4, a1
+; RV64-NEXT: vmv.v.x v9, a4
+; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vslidedown.vi v10, v8, 4
+; RV64-NEXT: vmv.x.s a4, v10
+; RV64-NEXT: and a4, a4, a3
+; RV64-NEXT: or a4, a4, a1
+; RV64-NEXT: vmv.v.x v10, a4
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 6
+; RV64-NEXT: vmv.x.s a2, v11
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a2, v8
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a1, a2, a1
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v8, v10, a1
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (a0)
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @copysign_vf_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: copysign_vf_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: fmv.x.w a1, fa0
+; RV32-NEXT: lui a2, 1048568
+; RV32-NEXT: and a1, a1, a2
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vmv.x.s a2, v9
+; RV32-NEXT: lui a3, 8
+; RV32-NEXT: addi a3, a3, -1
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vmv.x.s a4, v8
+; RV32-NEXT: and a4, a4, a3
+; RV32-NEXT: or a4, a4, a1
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a4
+; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vslidedown.vi v10, v8, 3
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vslidedown.vi v10, v8, 5
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: and a2, a2, a3
+; RV32-NEXT: or a2, a2, a1
+; RV32-NEXT: vslidedown.vi v10, v8, 4
+; RV32-NEXT: vmv.x.s a4, v10
+; RV32-NEXT: and a4, a4, a3
+; RV32-NEXT: or a1, a4, a1
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vslide1down.vx v10, v10, a2
+; RV32-NEXT: vslidedown.vi v11, v8, 6
+; RV32-NEXT: vmv.x.s a1, v11
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vslide1down.vx v10, v10, a1
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v8, v10, a1
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (a0)
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_vf_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: fmv.x.w a1, fa0
+; RV64-NEXT: lui a2, 1048568
+; RV64-NEXT: and a1, a1, a2
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vmv.x.s a2, v9
+; RV64-NEXT: lui a3, 8
+; RV64-NEXT: addiw a3, a3, -1
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vmv.x.s a4, v8
+; RV64-NEXT: and a4, a4, a3
+; RV64-NEXT: or a4, a4, a1
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v9, a4
+; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vslidedown.vi v10, v8, 3
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vslidedown.vi v10, v8, 5
+; RV64-NEXT: vmv.x.s a2, v10
+; RV64-NEXT: and a2, a2, a3
+; RV64-NEXT: or a2, a2, a1
+; RV64-NEXT: vslidedown.vi v10, v8, 4
+; RV64-NEXT: vmv.x.s a4, v10
+; RV64-NEXT: and a4, a4, a3
+; RV64-NEXT: or a1, a4, a1
+; RV64-NEXT: vmv.v.x v10, a1
+; RV64-NEXT: vslide1down.vx v10, v10, a2
+; RV64-NEXT: vslidedown.vi v11, v8, 6
+; RV64-NEXT: vmv.x.s a1, v11
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: vslide1down.vx v10, v10, a1
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v8, v10, a1
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (a0)
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = call <6 x bfloat> @llvm.copysign.v6bf16(<6 x bfloat> %a, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @copysign_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: copysign_vf_v8f16:
@@ -720,6 +6691,862 @@ define void @copysign_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @copysign_neg_v8bf16(ptr %x, ptr %y) {
+; RV32-LABEL: copysign_neg_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: .cfi_offset s7, -36
+; RV32-NEXT: .cfi_offset s8, -40
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 2 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s4, 524288
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s3, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s6, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s7, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s8, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v11, 1
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: lui a2, 8
+; RV32-NEXT: addi a3, a2, -1
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: lui a4, 1048568
+; RV32-NEXT: and a0, a0, a4
+; RV32-NEXT: or a0, a1, a0
+; RV32-NEXT: vmv.x.s a1, v11
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: and a5, s8, a2
+; RV32-NEXT: or a1, a1, a5
+; RV32-NEXT: vmv.v.x v8, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslidedown.vi v9, v11, 2
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s7, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslidedown.vi v9, v11, 3
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s6, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslidedown.vi v9, v11, 5
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s5, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslidedown.vi v9, v11, 4
+; RV32-NEXT: vmv.x.s a1, v9
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: and a2, s3, a2
+; RV32-NEXT: or a1, a1, a2
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vslidedown.vi v10, v11, 6
+; RV32-NEXT: vmv.x.s a0, v10
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s2, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vslidedown.vi v10, v11, 7
+; RV32-NEXT: vmv.x.s a0, v10
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a4, s1, a4
+; RV32-NEXT: or a0, a0, a4
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV32-NEXT: vse16.v v9, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_neg_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -160
+; RV64-NEXT: .cfi_def_cfa_offset 160
+; RV64-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs3, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs4, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs5, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: .cfi_offset s6, -64
+; RV64-NEXT: .cfi_offset s7, -72
+; RV64-NEXT: .cfi_offset fs0, -80
+; RV64-NEXT: .cfi_offset fs1, -88
+; RV64-NEXT: .cfi_offset fs2, -96
+; RV64-NEXT: .cfi_offset fs3, -104
+; RV64-NEXT: .cfi_offset fs4, -112
+; RV64-NEXT: .cfi_offset fs5, -120
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 160 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs2, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs3, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.x.w s2, fs5
+; RV64-NEXT: fmv.x.w s3, fs4
+; RV64-NEXT: fmv.x.w s4, fs3
+; RV64-NEXT: fmv.x.w s5, fs2
+; RV64-NEXT: fmv.x.w s6, fs1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: fmv.x.w s7, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v11, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: lui a2, 8
+; RV64-NEXT: addiw a3, a2, -1
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: lui a4, 1048568
+; RV64-NEXT: and a0, a0, a4
+; RV64-NEXT: or a0, a1, a0
+; RV64-NEXT: vmv.x.s a1, v11
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: and a5, s7, a2
+; RV64-NEXT: or a1, a1, a5
+; RV64-NEXT: vmv.v.x v8, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslidedown.vi v9, v11, 2
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s6, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslidedown.vi v9, v11, 3
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s5, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslidedown.vi v9, v11, 5
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s4, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslidedown.vi v9, v11, 4
+; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: and a2, s3, a2
+; RV64-NEXT: or a1, a1, a2
+; RV64-NEXT: vmv.v.x v9, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vslidedown.vi v10, v11, 6
+; RV64-NEXT: vmv.x.s a0, v10
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s2, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vslidedown.vi v10, v11, 7
+; RV64-NEXT: vmv.x.s a0, v10
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a4, s1, a4
+; RV64-NEXT: or a0, a0, a4
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV64-NEXT: vse16.v v9, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs3, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs4, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs5, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 160
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = fneg <8 x bfloat> %b
+ %d = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @copysign_neg_v6bf16(ptr %x, ptr %y) {
+; RV32-LABEL: copysign_neg_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: .cfi_offset s7, -36
+; RV32-NEXT: .cfi_offset s8, -40
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 2 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s4, 524288
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s3, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s6, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s7, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s8, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s4
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v11, 1
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: lui a2, 8
+; RV32-NEXT: addi a3, a2, -1
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: lui a4, 1048568
+; RV32-NEXT: and a0, a0, a4
+; RV32-NEXT: or a0, a1, a0
+; RV32-NEXT: vmv.x.s a1, v11
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: and a5, s8, a2
+; RV32-NEXT: or a1, a1, a5
+; RV32-NEXT: vmv.v.x v8, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslidedown.vi v9, v11, 2
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s7, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslidedown.vi v9, v11, 3
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s6, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslidedown.vi v9, v11, 5
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s5, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslidedown.vi v9, v11, 4
+; RV32-NEXT: vmv.x.s a1, v9
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: and a2, s3, a2
+; RV32-NEXT: or a1, a1, a2
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vslidedown.vi v10, v11, 6
+; RV32-NEXT: vmv.x.s a0, v10
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a1, s2, a4
+; RV32-NEXT: or a0, a0, a1
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vslidedown.vi v10, v11, 7
+; RV32-NEXT: vmv.x.s a0, v10
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: and a4, s1, a4
+; RV32-NEXT: or a0, a0, a4
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV32-NEXT: vse16.v v9, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_neg_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -160
+; RV64-NEXT: .cfi_def_cfa_offset 160
+; RV64-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs3, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs4, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs5, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: .cfi_offset s6, -64
+; RV64-NEXT: .cfi_offset s7, -72
+; RV64-NEXT: .cfi_offset fs0, -80
+; RV64-NEXT: .cfi_offset fs1, -88
+; RV64-NEXT: .cfi_offset fs2, -96
+; RV64-NEXT: .cfi_offset fs3, -104
+; RV64-NEXT: .cfi_offset fs4, -112
+; RV64-NEXT: .cfi_offset fs5, -120
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 160 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs2, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs3, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.x.w s2, fs5
+; RV64-NEXT: fmv.x.w s3, fs4
+; RV64-NEXT: fmv.x.w s4, fs3
+; RV64-NEXT: fmv.x.w s5, fs2
+; RV64-NEXT: fmv.x.w s6, fs1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: fmv.x.w s7, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v11, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: lui a2, 8
+; RV64-NEXT: addiw a3, a2, -1
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: lui a4, 1048568
+; RV64-NEXT: and a0, a0, a4
+; RV64-NEXT: or a0, a1, a0
+; RV64-NEXT: vmv.x.s a1, v11
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: and a5, s7, a2
+; RV64-NEXT: or a1, a1, a5
+; RV64-NEXT: vmv.v.x v8, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslidedown.vi v9, v11, 2
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s6, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslidedown.vi v9, v11, 3
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s5, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslidedown.vi v9, v11, 5
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s4, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslidedown.vi v9, v11, 4
+; RV64-NEXT: vmv.x.s a1, v9
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: and a2, s3, a2
+; RV64-NEXT: or a1, a1, a2
+; RV64-NEXT: vmv.v.x v9, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vslidedown.vi v10, v11, 6
+; RV64-NEXT: vmv.x.s a0, v10
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a1, s2, a4
+; RV64-NEXT: or a0, a0, a1
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vslidedown.vi v10, v11, 7
+; RV64-NEXT: vmv.x.s a0, v10
+; RV64-NEXT: and a0, a0, a3
+; RV64-NEXT: and a4, s1, a4
+; RV64-NEXT: or a0, a0, a4
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
+; RV64-NEXT: vse16.v v9, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs3, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs4, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs5, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 160
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = fneg <6 x bfloat> %b
+ %d = call <6 x bfloat> @llvm.copysign.v6bf16(<6 x bfloat> %a, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @copysign_neg_v8f16(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_v8f16:
; ZVFH: # %bb.0:
@@ -818,6 +7645,592 @@ define void @copysign_neg_v2f64(ptr %x, ptr %y) {
ret void
}
+define void @copysign_neg_trunc_v4bf16_v4f32(ptr %x, ptr %y) {
+; RV32-LABEL: copysign_neg_trunc_v4bf16_v4f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a3, a2, 1
+; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle32.v v8, (a1)
+; RV32-NEXT: vslidedown.vi v9, v9, 1
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: lui s4, 8
+; RV32-NEXT: addi s1, s4, -1
+; RV32-NEXT: and s5, a0, s1
+; RV32-NEXT: vfncvtbf16.f.f.w v9, v8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s2, 524288
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: lui s3, 1048568
+; RV32-NEXT: and a0, a0, s3
+; RV32-NEXT: or s5, s5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: and s6, a0, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: and a0, a0, s4
+; RV32-NEXT: or a0, s6, a0
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s5
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: and s4, a0, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: and a0, a0, s3
+; RV32-NEXT: or a0, s4, a0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: and s1, a0, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: and a0, a0, s3
+; RV32-NEXT: or a0, s1, a0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_neg_trunc_v4bf16_v4f32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -96
+; RV64-NEXT: .cfi_def_cfa_offset 96
+; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a3, a2, 1
+; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle32.v v8, (a1)
+; RV64-NEXT: vslidedown.vi v9, v9, 1
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: lui s3, 8
+; RV64-NEXT: addiw s1, s3, -1
+; RV64-NEXT: and s4, a0, s1
+; RV64-NEXT: vfncvtbf16.f.f.w v9, v8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: lui s2, 1048568
+; RV64-NEXT: and a0, a0, s2
+; RV64-NEXT: or s4, s4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: and s5, a0, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: and a0, a0, s3
+; RV64-NEXT: or a0, s5, a0
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: and s3, a0, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: and a0, a0, s2
+; RV64-NEXT: or a0, s3, a0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: and s1, a0, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: and a0, a0, s2
+; RV64-NEXT: or a0, s1, a0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 96
+; RV64-NEXT: ret
+ %a = load <4 x bfloat>, ptr %x
+ %b = load <4 x float>, ptr %y
+ %c = fneg <4 x float> %b
+ %d = fptrunc <4 x float> %c to <4 x bfloat>
+ %e = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> %a, <4 x bfloat> %d)
+ store <4 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @copysign_neg_trunc_v3bf16_v3f32(ptr %x, ptr %y) {
+; RV32-LABEL: copysign_neg_trunc_v3bf16_v3f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a3, a2, 1
+; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle32.v v8, (a1)
+; RV32-NEXT: vslidedown.vi v9, v9, 1
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: lui s4, 8
+; RV32-NEXT: addi s1, s4, -1
+; RV32-NEXT: and s5, a0, s1
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vfncvtbf16.f.f.w v9, v8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s2, 524288
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: lui s3, 1048568
+; RV32-NEXT: and a0, a0, s3
+; RV32-NEXT: or s5, s5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: and s6, a0, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: and a0, a0, s4
+; RV32-NEXT: or a0, s6, a0
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s5
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: and s4, a0, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: and a0, a0, s3
+; RV32-NEXT: or a0, s4, a0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: and s1, a0, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s2
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: and a0, a0, s3
+; RV32-NEXT: or a0, s1, a0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: copysign_neg_trunc_v3bf16_v3f32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -96
+; RV64-NEXT: .cfi_def_cfa_offset 96
+; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a3, a2, 1
+; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle32.v v8, (a1)
+; RV64-NEXT: vslidedown.vi v9, v9, 1
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: lui s3, 8
+; RV64-NEXT: addiw s1, s3, -1
+; RV64-NEXT: and s4, a0, s1
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vfncvtbf16.f.f.w v9, v8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: lui s2, 1048568
+; RV64-NEXT: and a0, a0, s2
+; RV64-NEXT: or s4, s4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: and s5, a0, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: and a0, a0, s3
+; RV64-NEXT: or a0, s5, a0
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: and s3, a0, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: and a0, a0, s2
+; RV64-NEXT: or a0, s3, a0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: and s1, a0, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: and a0, a0, s2
+; RV64-NEXT: or a0, s1, a0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 96
+; RV64-NEXT: ret
+ %a = load <3 x bfloat>, ptr %x
+ %b = load <3 x float>, ptr %y
+ %c = fneg <3 x float> %b
+ %d = fptrunc <3 x float> %c to <3 x bfloat>
+ %e = call <3 x bfloat> @llvm.copysign.v3bf16(<3 x bfloat> %a, <3 x bfloat> %d)
+ store <3 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @copysign_neg_trunc_v4f16_v4f32(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_trunc_v4f16_v4f32:
; ZVFH: # %bb.0:
@@ -851,7 +8264,6 @@ define void @copysign_neg_trunc_v4f16_v4f32(ptr %x, ptr %y) {
store <4 x half> %e, ptr %x
ret void
}
-declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_trunc_v3f16_v3f32:
@@ -890,7 +8302,6 @@ define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
store <3 x half> %e, ptr %x
ret void
}
-declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>)
define void @copysign_neg_ext_v2f64_v2f32(ptr %x, ptr %y) {
; CHECK-LABEL: copysign_neg_ext_v2f64_v2f32:
@@ -912,6 +8323,604 @@ define void @copysign_neg_ext_v2f64_v2f32(ptr %x, ptr %y) {
ret void
}
+define void @sqrt_v8bf16(ptr %x) {
+; RV32-LABEL: sqrt_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa5, fa5
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa5, fa5
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sqrt_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.sqrt.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @sqrt_v6bf16(ptr %x) {
+; RV32-LABEL: sqrt_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa5, fa5
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsqrt.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: sqrt_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsqrt.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.sqrt.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @sqrt_v8f16(ptr %x) {
; ZVFH-LABEL: sqrt_v8f16:
; ZVFH: # %bb.0:
@@ -937,7 +8946,6 @@ define void @sqrt_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.sqrt.v8f16(<8 x half>)
define void @sqrt_v6f16(ptr %x) {
; ZVFH-LABEL: sqrt_v6f16:
@@ -965,7 +8973,6 @@ define void @sqrt_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.sqrt.v6f16(<6 x half>)
define void @sqrt_v4f32(ptr %x) {
; CHECK-LABEL: sqrt_v4f32:
@@ -980,7 +8987,6 @@ define void @sqrt_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.sqrt.v4f32(<4 x float>)
define void @sqrt_v2f64(ptr %x) {
; CHECK-LABEL: sqrt_v2f64:
@@ -995,7 +9001,1300 @@ define void @sqrt_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.sqrt.v2f64(<2 x double>)
+
+define void @fma_v8bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fma_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a2)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 32
+; RV32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 2
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fma_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a3, a4, a3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 32
+; RV64-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v10, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %d = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fma_v6bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fma_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a2)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 32
+; RV32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 2
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fma_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a3, a4, a3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 32
+; RV64-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v10, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %d = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fma_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fma_v8f16:
@@ -1030,7 +10329,6 @@ define void @fma_v8f16(ptr %x, ptr %y, ptr %z) {
store <8 x half> %d, ptr %x
ret void
}
-declare <8 x half> @llvm.fma.v8f16(<8 x half>, <8 x half>, <8 x half>)
define void @fma_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fma_v6f16:
@@ -1066,7 +10364,6 @@ define void @fma_v6f16(ptr %x, ptr %y, ptr %z) {
store <6 x half> %d, ptr %x
ret void
}
-declare <6 x half> @llvm.fma.v6f16(<6 x half>, <6 x half>, <6 x half>)
define void @fma_v4f32(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v4f32:
@@ -1085,7 +10382,6 @@ define void @fma_v4f32(ptr %x, ptr %y, ptr %z) {
store <4 x float> %d, ptr %x
ret void
}
-declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
define void @fma_v2f64(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v2f64:
@@ -1104,7 +10400,1634 @@ define void @fma_v2f64(ptr %x, ptr %y, ptr %z) {
store <2 x double> %d, ptr %x
ret void
}
-declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define void @fmsub_v8bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fmsub_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: .cfi_offset s7, -36
+; RV32-NEXT: .cfi_offset s8, -40
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 2
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a2)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs1r.v v9, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a2, a0, 1
+; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s6, 524288
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s3, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s7, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s8, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s6, fa0
+; RV32-NEXT: slli s8, s8, 16
+; RV32-NEXT: fmv.w.x fa5, s8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s6
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s7, s7, 16
+; RV32-NEXT: fmv.w.x fa5, s7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s5, s5, 16
+; RV32-NEXT: fmv.w.x fa5, s5
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s4, s4, 16
+; RV32-NEXT: fmv.w.x fa5, s4
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: slli s3, s3, 16
+; RV32-NEXT: fmv.w.x fa5, s3
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s4
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s2, s2, 16
+; RV32-NEXT: fmv.w.x fa5, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s1, s1, 16
+; RV32-NEXT: fmv.w.x fa5, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmsub_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -144
+; RV64-NEXT: .cfi_def_cfa_offset 144
+; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: .cfi_offset s6, -64
+; RV64-NEXT: .cfi_offset s7, -72
+; RV64-NEXT: .cfi_offset s8, -80
+; RV64-NEXT: .cfi_offset fs0, -88
+; RV64-NEXT: .cfi_offset fs1, -96
+; RV64-NEXT: .cfi_offset fs2, -104
+; RV64-NEXT: .cfi_offset fs3, -112
+; RV64-NEXT: .cfi_offset fs4, -120
+; RV64-NEXT: .cfi_offset fs5, -128
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a3, a3, 2
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 4 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a3, a0, 1
+; RV64-NEXT: add a0, a3, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs2, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs3, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.x.w s2, fs5
+; RV64-NEXT: fmv.x.w s3, fs4
+; RV64-NEXT: fmv.x.w s4, fs3
+; RV64-NEXT: fmv.x.w s5, fs2
+; RV64-NEXT: fmv.x.w s6, fs1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: fmv.x.w s7, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa5, a1
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa4, a1
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s8, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s7, s7, 16
+; RV64-NEXT: fmv.w.x fa3, s7
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s6, s6, 16
+; RV64-NEXT: fmv.w.x fa3, s6
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s5, s5, 16
+; RV64-NEXT: fmv.w.x fa3, s5
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s4, s4, 16
+; RV64-NEXT: fmv.w.x fa3, s4
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s3, s3, 16
+; RV64-NEXT: fmv.w.x fa3, s3
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s2, s2, 16
+; RV64-NEXT: fmv.w.x fa3, s2
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s1, s1, 16
+; RV64-NEXT: fmv.w.x fa3, s1
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 144
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %neg = fneg <8 x bfloat> %c
+ %d = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %neg)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmsub_v6bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fmsub_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: .cfi_offset s7, -36
+; RV32-NEXT: .cfi_offset s8, -40
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 2
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a2)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs1r.v v9, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a2, a0, 1
+; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s6, 524288
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s3, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s7, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s8, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s6, fa0
+; RV32-NEXT: slli s8, s8, 16
+; RV32-NEXT: fmv.w.x fa5, s8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s6
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s7, s7, 16
+; RV32-NEXT: fmv.w.x fa5, s7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s5, s5, 16
+; RV32-NEXT: fmv.w.x fa5, s5
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s4, s4, 16
+; RV32-NEXT: fmv.w.x fa5, s4
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: slli s3, s3, 16
+; RV32-NEXT: fmv.w.x fa5, s3
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s4
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s2, s2, 16
+; RV32-NEXT: fmv.w.x fa5, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s1, s1, 16
+; RV32-NEXT: fmv.w.x fa5, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmsub_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -144
+; RV64-NEXT: .cfi_def_cfa_offset 144
+; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: .cfi_offset s6, -64
+; RV64-NEXT: .cfi_offset s7, -72
+; RV64-NEXT: .cfi_offset s8, -80
+; RV64-NEXT: .cfi_offset fs0, -88
+; RV64-NEXT: .cfi_offset fs1, -96
+; RV64-NEXT: .cfi_offset fs2, -104
+; RV64-NEXT: .cfi_offset fs3, -112
+; RV64-NEXT: .cfi_offset fs4, -120
+; RV64-NEXT: .cfi_offset fs5, -128
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a3, a3, 2
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 4 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a3, a0, 1
+; RV64-NEXT: add a0, a3, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs2, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs3, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.x.w s2, fs5
+; RV64-NEXT: fmv.x.w s3, fs4
+; RV64-NEXT: fmv.x.w s4, fs3
+; RV64-NEXT: fmv.x.w s5, fs2
+; RV64-NEXT: fmv.x.w s6, fs1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: fmv.x.w s7, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa5, a1
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa4, a1
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s8, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s7, s7, 16
+; RV64-NEXT: fmv.w.x fa3, s7
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s6, s6, 16
+; RV64-NEXT: fmv.w.x fa3, s6
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s5, s5, 16
+; RV64-NEXT: fmv.w.x fa3, s5
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s4, s4, 16
+; RV64-NEXT: fmv.w.x fa3, s4
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s3, s3, 16
+; RV64-NEXT: fmv.w.x fa3, s3
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s2, s2, 16
+; RV64-NEXT: fmv.w.x fa3, s2
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: slli s1, s1, 16
+; RV64-NEXT: fmv.w.x fa3, s1
+; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 144
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %neg = fneg <6 x bfloat> %c
+ %d = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %neg)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fmsub_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmsub_v8f16:
@@ -1220,6 +12143,939 @@ define void @fnmadd_v2f64(ptr %x, ptr %y, ptr %z) {
ret void
}
+define void @fadd_v16bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fadd_v16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: mv a3, a2
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a2, a2, a3
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v10, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fadd_v16bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: mv a3, a2
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: add a2, a2, a3
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v10, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fadd <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fadd_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fadd_v16f16:
; ZVFH: # %bb.0:
@@ -1282,6 +13138,939 @@ define void @fadd_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fsub_v16bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fsub_v16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: mv a3, a2
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a2, a2, a3
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v10, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fsub_v16bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: mv a3, a2
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: add a2, a2, a3
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v10, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fsub <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fsub_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fsub_v16f16:
; ZVFH: # %bb.0:
@@ -1344,6 +14133,939 @@ define void @fsub_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fmul_v16bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fmul_v16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: mv a3, a2
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a2, a2, a3
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v10, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmul_v16bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: mv a3, a2
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: add a2, a2, a3
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v10, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fmul <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fmul_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fmul_v16f16:
; ZVFH: # %bb.0:
@@ -1406,6 +15128,939 @@ define void @fmul_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fdiv_v16bf16(ptr %x, ptr %y) {
+; RV32-LABEL: fdiv_v16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: mv a3, a2
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a2, a2, a3
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v10, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fdiv.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fdiv_v16bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: mv a3, a2
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: add a2, a2, a3
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v10, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fdiv.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = fdiv <16 x bfloat> %a, %b
+ store <16 x bfloat> %c, ptr %x
+ ret void
+}
+
define void @fdiv_v16f16(ptr %x, ptr %y) {
; ZVFH-LABEL: fdiv_v16f16:
; ZVFH: # %bb.0:
@@ -1468,6 +16123,643 @@ define void @fdiv_v4f64(ptr %x, ptr %y) {
ret void
}
+define void @fneg_v16bf16(ptr %x) {
+; RV32-LABEL: fneg_v16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 4 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s1, 524288
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s1
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fneg_v16bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 2
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa5, fa5
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = fneg <16 x bfloat> %a
+ store <16 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @fneg_v16f16(ptr %x) {
; ZVFH-LABEL: fneg_v16f16:
; ZVFH: # %bb.0:
@@ -1519,6 +16811,1312 @@ define void @fneg_v4f64(ptr %x) {
ret void
}
+define void @fma_v16bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fma_v16bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 8 * vlenb
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v8, (a2)
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 1
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 32
+; RV32-NEXT: vs2r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v10, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a2, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v12, (a1)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v12, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 8
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 9
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 10
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 12
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 14
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 15
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa3, a0
+; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fma_v16bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a3, a3, 3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 8 * vlenb
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 32
+; RV64-NEXT: vs2r.v v8, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v10, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a2, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v12, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v12, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 8
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 9
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 10
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 11
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 12
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 13
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 14
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 15
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa3, a0
+; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <16 x bfloat>, ptr %x
+ %b = load <16 x bfloat>, ptr %y
+ %c = load <16 x bfloat>, ptr %z
+ %d = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c)
+ store <16 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fma_v16f16:
; ZVFH: # %bb.0:
@@ -1552,7 +18150,6 @@ define void @fma_v16f16(ptr %x, ptr %y, ptr %z) {
store <16 x half> %d, ptr %x
ret void
}
-declare <16 x half> @llvm.fma.v16f16(<16 x half>, <16 x half>, <16 x half>)
define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v8f32:
@@ -1571,7 +18168,6 @@ define void @fma_v8f32(ptr %x, ptr %y, ptr %z) {
store <8 x float> %d, ptr %x
ret void
}
-declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
define void @fma_v4f64(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fma_v4f64:
@@ -1590,7 +18186,734 @@ define void @fma_v4f64(ptr %x, ptr %y, ptr %z) {
store <4 x double> %d, ptr %x
ret void
}
-declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
+
+define void @fadd_vf_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fadd_vf_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fadd_vf_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fadd <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fadd_vf_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fadd_vf_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fadd_vf_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fadd <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fadd_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fadd_vf_v8f16:
@@ -1687,6 +19010,734 @@ define void @fadd_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fadd_fv_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fadd_fv_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fadd_fv_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fadd <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fadd_fv_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fadd_fv_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fadd.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fadd_fv_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fadd.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fadd <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fadd_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fadd_fv_v8f16:
; ZVFH: # %bb.0:
@@ -1782,6 +19833,644 @@ define void @fadd_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fsub_vf_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fsub_vf_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fsub_vf_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fsub <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fsub_vf_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fsub_vf_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v10, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v10, 6
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: srli a0, a0, 16
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslidedown.vi v9, v10, 7
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fsub_vf_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v10, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v10, 6
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 48
+; RV64-NEXT: srli a0, a0, 48
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslidedown.vi v9, v10, 7
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fsub <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fsub_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fsub_vf_v8f16:
; ZVFH: # %bb.0:
@@ -1877,6 +20566,734 @@ define void @fsub_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fsub_fv_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fsub_fv_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fsub_fv_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fsub <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fsub_fv_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fsub_fv_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fsub.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fsub_fv_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fsub.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fsub <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fsub_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fsub_fv_v8f16:
; ZVFH: # %bb.0:
@@ -1972,6 +21389,734 @@ define void @fsub_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fmul_vf_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fmul_vf_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmul_vf_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fmul <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmul_vf_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fmul_vf_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmul_vf_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fmul <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fmul_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fmul_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2067,6 +22212,734 @@ define void @fmul_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fmul_fv_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fmul_fv_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmul_fv_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fmul <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmul_fv_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fmul_fv_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmul.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmul_fv_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmul.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fmul <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fmul_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fmul_fv_v8f16:
; ZVFH: # %bb.0:
@@ -2162,6 +23035,734 @@ define void @fmul_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fdiv_vf_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fdiv_vf_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fdiv_vf_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fdiv <8 x bfloat> %a, %c
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fdiv_vf_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fdiv_vf_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fa5, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fdiv_vf_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fa5, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fdiv <6 x bfloat> %a, %c
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fdiv_vf_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fdiv_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2257,6 +23858,734 @@ define void @fdiv_vf_v2f64(ptr %x, double %y) {
ret void
}
+define void @fdiv_fv_v8bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fdiv_fv_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fdiv_fv_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %d = fdiv <8 x bfloat> %c, %a
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fdiv_fv_v6bf16(ptr %x, bfloat %y) {
+; RV32-LABEL: fdiv_fv_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fdiv.s fa0, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fdiv_fv_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fdiv.s fa0, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
+ %c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %d = fdiv <6 x bfloat> %c, %a
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
+
define void @fdiv_fv_v8f16(ptr %x, half %y) {
; ZVFH-LABEL: fdiv_fv_v8f16:
; ZVFH: # %bb.0:
@@ -2352,6 +24681,1040 @@ define void @fdiv_fv_v2f64(ptr %x, double %y) {
ret void
}
+define void @fma_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
+; RV32-LABEL: fma_vf_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v9, (s0)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fma_vf_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v9, (s0)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <8 x bfloat> %c, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %e = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %d, <8 x bfloat> %b)
+ store <8 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @fma_vf_v6bf16(ptr %x, ptr %y, bfloat %z) {
+; RV32-LABEL: fma_vf_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v9, (s0)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fma_vf_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v9, (s0)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <6 x bfloat> %c, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %e = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %d, <6 x bfloat> %b)
+ store <6 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @fma_vf_v8f16(ptr %x, ptr %y, half %z) {
; ZVFH-LABEL: fma_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2459,6 +25822,1040 @@ define void @fma_vf_v2f64(ptr %x, ptr %y, double %z) {
ret void
}
+define void @fma_fv_v8bf16(ptr %x, ptr %y, bfloat %z) {
+; RV32-LABEL: fma_fv_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v9, (s0)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fma_fv_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v9, (s0)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <8 x bfloat> %c, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %e = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %d, <8 x bfloat> %a, <8 x bfloat> %b)
+ store <8 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @fma_fv_v6bf16(ptr %x, ptr %y, bfloat %z) {
+; RV32-LABEL: fma_fv_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v9, (s0)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fma_fv_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v9, (s0)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <6 x bfloat> %c, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %e = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %d, <6 x bfloat> %a, <6 x bfloat> %b)
+ store <6 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @fma_fv_v8f16(ptr %x, ptr %y, half %z) {
; ZVFH-LABEL: fma_fv_v8f16:
; ZVFH: # %bb.0:
@@ -2566,6 +26963,1334 @@ define void @fma_fv_v2f64(ptr %x, ptr %y, double %z) {
ret void
}
+define void @fmsub_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
+; RV32-LABEL: fmsub_vf_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: .cfi_offset s7, -36
+; RV32-NEXT: .cfi_offset s8, -40
+; RV32-NEXT: .cfi_offset s9, -44
+; RV32-NEXT: .cfi_offset fs0, -56
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a3, a2, 1
+; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 3 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s6, 524288
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s3, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s7, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s8, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s9, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: slli s5, s5, 16
+; RV32-NEXT: fmv.w.x fs0, s5
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: slli s9, s9, 16
+; RV32-NEXT: fmv.w.x fa5, s9
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s5
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s8, s8, 16
+; RV32-NEXT: fmv.w.x fa5, s8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s7, s7, 16
+; RV32-NEXT: fmv.w.x fa5, s7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s4, s4, 16
+; RV32-NEXT: fmv.w.x fa5, s4
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: slli s3, s3, 16
+; RV32-NEXT: fmv.w.x fa5, s3
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s4
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s2, s2, 16
+; RV32-NEXT: fmv.w.x fa5, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s1, s1, 16
+; RV32-NEXT: fmv.w.x fa5, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmsub_vf_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -144
+; RV64-NEXT: .cfi_def_cfa_offset 144
+; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: .cfi_offset s6, -64
+; RV64-NEXT: .cfi_offset s7, -72
+; RV64-NEXT: .cfi_offset s8, -80
+; RV64-NEXT: .cfi_offset fs0, -88
+; RV64-NEXT: .cfi_offset fs1, -96
+; RV64-NEXT: .cfi_offset fs2, -104
+; RV64-NEXT: .cfi_offset fs3, -112
+; RV64-NEXT: .cfi_offset fs4, -120
+; RV64-NEXT: .cfi_offset fs5, -128
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a3, a2, 1
+; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs2, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs3, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.x.w s2, fs5
+; RV64-NEXT: fmv.x.w s3, fs4
+; RV64-NEXT: fmv.x.w s5, fs3
+; RV64-NEXT: fmv.x.w s6, fs2
+; RV64-NEXT: fmv.x.w s7, fs1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: fmv.x.w s8, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli s4, s4, 16
+; RV64-NEXT: fmv.w.x fs0, s4
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa5, a1
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s8, s8, 16
+; RV64-NEXT: fmv.w.x fa4, s8
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s7, s7, 16
+; RV64-NEXT: fmv.w.x fa4, s7
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s6, s6, 16
+; RV64-NEXT: fmv.w.x fa4, s6
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s5, s5, 16
+; RV64-NEXT: fmv.w.x fa4, s5
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s3, s3, 16
+; RV64-NEXT: fmv.w.x fa4, s3
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s2, s2, 16
+; RV64-NEXT: fmv.w.x fa4, s2
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s1, s1, 16
+; RV64-NEXT: fmv.w.x fa4, s1
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 144
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <8 x bfloat> %c, <8 x bfloat> poison, <8 x i32> zeroinitializer
+ %neg = fneg <8 x bfloat> %b
+ %e = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %d, <8 x bfloat> %neg)
+ store <8 x bfloat> %e, ptr %x
+ ret void
+}
+
+define void @fmsub_vf_v6bf16(ptr %x, ptr %y, bfloat %z) {
+; RV32-LABEL: fmsub_vf_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -80
+; RV32-NEXT: .cfi_def_cfa_offset 80
+; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset s3, -20
+; RV32-NEXT: .cfi_offset s4, -24
+; RV32-NEXT: .cfi_offset s5, -28
+; RV32-NEXT: .cfi_offset s6, -32
+; RV32-NEXT: .cfi_offset s7, -36
+; RV32-NEXT: .cfi_offset s8, -40
+; RV32-NEXT: .cfi_offset s9, -44
+; RV32-NEXT: .cfi_offset fs0, -56
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a3, a2, 1
+; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: sub sp, sp, a2
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 3 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v9, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v9, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: lui s6, 524288
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s3, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s7, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s8, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s9, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: xor a0, a0, s6
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: slli s5, s5, 16
+; RV32-NEXT: fmv.w.x fs0, s5
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s5, fa0
+; RV32-NEXT: slli s9, s9, 16
+; RV32-NEXT: fmv.w.x fa5, s9
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s5
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s8, s8, 16
+; RV32-NEXT: fmv.w.x fa5, s8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s7, s7, 16
+; RV32-NEXT: fmv.w.x fa5, s7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s4, s4, 16
+; RV32-NEXT: fmv.w.x fa5, s4
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s4, fa0
+; RV32-NEXT: slli s3, s3, 16
+; RV32-NEXT: fmv.w.x fa5, s3
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s4
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s2, s2, 16
+; RV32-NEXT: fmv.w.x fa5, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmv.w.x fs0, zero
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: slli s1, s1, 16
+; RV32-NEXT: fmv.w.x fa5, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 80
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmsub_vf_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -144
+; RV64-NEXT: .cfi_def_cfa_offset 144
+; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset s3, -40
+; RV64-NEXT: .cfi_offset s4, -48
+; RV64-NEXT: .cfi_offset s5, -56
+; RV64-NEXT: .cfi_offset s6, -64
+; RV64-NEXT: .cfi_offset s7, -72
+; RV64-NEXT: .cfi_offset s8, -80
+; RV64-NEXT: .cfi_offset fs0, -88
+; RV64-NEXT: .cfi_offset fs1, -96
+; RV64-NEXT: .cfi_offset fs2, -104
+; RV64-NEXT: .cfi_offset fs3, -112
+; RV64-NEXT: .cfi_offset fs4, -120
+; RV64-NEXT: .cfi_offset fs5, -128
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a3, a2, 1
+; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a1)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs2, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs3, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: fmv.x.w s2, fs5
+; RV64-NEXT: fmv.x.w s3, fs4
+; RV64-NEXT: fmv.x.w s5, fs3
+; RV64-NEXT: fmv.x.w s6, fs2
+; RV64-NEXT: fmv.x.w s7, fs1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fneg.s fa0, fa5
+; RV64-NEXT: fmv.x.w s8, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli s4, s4, 16
+; RV64-NEXT: fmv.w.x fs0, s4
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa5, a1
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s8, s8, 16
+; RV64-NEXT: fmv.w.x fa4, s8
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s7, s7, 16
+; RV64-NEXT: fmv.w.x fa4, s7
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s6, s6, 16
+; RV64-NEXT: fmv.w.x fa4, s6
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s5, s5, 16
+; RV64-NEXT: fmv.w.x fa4, s5
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s4, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s3, s3, 16
+; RV64-NEXT: fmv.w.x fa4, s3
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s4
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s2, s2, 16
+; RV64-NEXT: fmv.w.x fa4, s2
+; RV64-NEXT: fmv.w.x fs0, zero
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: slli s1, s1, 16
+; RV64-NEXT: fmv.w.x fa4, s1
+; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 144
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
+ %d = shufflevector <6 x bfloat> %c, <6 x bfloat> poison, <6 x i32> zeroinitializer
+ %neg = fneg <6 x bfloat> %b
+ %e = call <6 x bfloat> @llvm.fma.v6bf16(<6 x bfloat> %a, <6 x bfloat> %d, <6 x bfloat> %neg)
+ store <6 x bfloat> %e, ptr %x
+ ret void
+}
+
define void @fmsub_vf_v8f16(ptr %x, ptr %y, half %z) {
; ZVFH-LABEL: fmsub_vf_v8f16:
; ZVFH: # %bb.0:
@@ -2721,13 +28446,899 @@ define void @fnmadd_fv_v2f64(ptr %x, ptr %y, double %z) {
ret void
}
+define void @trunc_v8bf16(ptr %x) {
+; RV32-LABEL: trunc_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: .cfi_offset fs1, -32
+; RV32-NEXT: .cfi_offset fs2, -40
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs2, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: beqz a0, .LBB169_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB169_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB169_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa5, rtz
+; RV32-NEXT: fcvt.s.w fa4, a0, rtz
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB169_4:
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: beqz a0, .LBB169_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa5, rtz
+; RV32-NEXT: fcvt.s.w fa4, a0, rtz
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB169_6:
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs1, fa0
+; RV32-NEXT: beqz a0, .LBB169_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa5, rtz
+; RV32-NEXT: fcvt.s.w fa4, a0, rtz
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB169_8:
+; RV32-NEXT: fmv.x.w s1, fs0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB169_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa5, rtz
+; RV32-NEXT: fcvt.s.w fa4, a0, rtz
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB169_10:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs1
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB169_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB169_12:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 6
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB169_14
+; RV32-NEXT: # %bb.13:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB169_14:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs2
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB169_16
+; RV32-NEXT: # %bb.15:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB169_16:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -96
+; RV64-NEXT: .cfi_def_cfa_offset 96
+; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset fs0, -40
+; RV64-NEXT: .cfi_offset fs1, -48
+; RV64-NEXT: .cfi_offset fs2, -56
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs2, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: beqz a0, .LBB169_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB169_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB169_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa5, rtz
+; RV64-NEXT: fcvt.s.w fa4, a0, rtz
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB169_4:
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: beqz a0, .LBB169_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa5, rtz
+; RV64-NEXT: fcvt.s.w fa4, a0, rtz
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB169_6:
+; RV64-NEXT: fmv.x.w s2, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: beqz a0, .LBB169_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa5, rtz
+; RV64-NEXT: fcvt.s.w fa4, a0, rtz
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB169_8:
+; RV64-NEXT: fmv.x.w s1, fs0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB169_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa5, rtz
+; RV64-NEXT: fcvt.s.w fa4, a0, rtz
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB169_10:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs1
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB169_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB169_12:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 6
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB169_14
+; RV64-NEXT: # %bb.13:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB169_14:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs2
+; RV64-NEXT: addi a2, sp, 32
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB169_16
+; RV64-NEXT: # %bb.15:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB169_16:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 96
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.trunc.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @trunc_v6bf16(ptr %x) {
+; RV32-LABEL: trunc_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: beqz a0, .LBB170_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB170_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs0
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB170_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB170_4:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 2
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB170_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB170_6:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB170_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB170_8:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB170_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB170_10:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB170_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rtz
+; RV32-NEXT: fcvt.s.w fa5, a0, rtz
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB170_12:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: beqz a0, .LBB170_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB170_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs0
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB170_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB170_4:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 2
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB170_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB170_6:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB170_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB170_8:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB170_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB170_10:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB170_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rtz
+; RV64-NEXT: fcvt.s.w fa5, a0, rtz
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB170_12:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.trunc.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
+
define void @trunc_v8f16(ptr %x) {
; ZVFH-LABEL: trunc_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI115_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI115_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI171_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI171_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -2760,15 +29371,14 @@ define void @trunc_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.trunc.v8f16(<8 x half>)
define void @trunc_v6f16(ptr %x) {
; ZVFH-LABEL: trunc_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI116_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI116_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI172_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI172_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -2803,7 +29413,6 @@ define void @trunc_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.trunc.v6f16(<6 x half>)
define void @trunc_v4f32(ptr %x) {
; CHECK-LABEL: trunc_v4f32:
@@ -2825,15 +29434,14 @@ define void @trunc_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.trunc.v4f32(<4 x float>)
define void @trunc_v2f64(ptr %x) {
; CHECK-LABEL: trunc_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI118_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI118_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI174_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI174_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -2847,15 +29455,900 @@ define void @trunc_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
+
+define void @ceil_v8bf16(ptr %x) {
+; RV32-LABEL: ceil_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: .cfi_offset fs1, -32
+; RV32-NEXT: .cfi_offset fs2, -40
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs2, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: beqz a0, .LBB175_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB175_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB175_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa5, rup
+; RV32-NEXT: fcvt.s.w fa4, a0, rup
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB175_4:
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: beqz a0, .LBB175_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa5, rup
+; RV32-NEXT: fcvt.s.w fa4, a0, rup
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB175_6:
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs1, fa0
+; RV32-NEXT: beqz a0, .LBB175_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa5, rup
+; RV32-NEXT: fcvt.s.w fa4, a0, rup
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB175_8:
+; RV32-NEXT: fmv.x.w s1, fs0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB175_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa5, rup
+; RV32-NEXT: fcvt.s.w fa4, a0, rup
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB175_10:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs1
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB175_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB175_12:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 6
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB175_14
+; RV32-NEXT: # %bb.13:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB175_14:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs2
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB175_16
+; RV32-NEXT: # %bb.15:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB175_16:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -96
+; RV64-NEXT: .cfi_def_cfa_offset 96
+; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset fs0, -40
+; RV64-NEXT: .cfi_offset fs1, -48
+; RV64-NEXT: .cfi_offset fs2, -56
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs2, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: beqz a0, .LBB175_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB175_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB175_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa5, rup
+; RV64-NEXT: fcvt.s.w fa4, a0, rup
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB175_4:
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: beqz a0, .LBB175_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa5, rup
+; RV64-NEXT: fcvt.s.w fa4, a0, rup
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB175_6:
+; RV64-NEXT: fmv.x.w s2, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: beqz a0, .LBB175_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa5, rup
+; RV64-NEXT: fcvt.s.w fa4, a0, rup
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB175_8:
+; RV64-NEXT: fmv.x.w s1, fs0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB175_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa5, rup
+; RV64-NEXT: fcvt.s.w fa4, a0, rup
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB175_10:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs1
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB175_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB175_12:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 6
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB175_14
+; RV64-NEXT: # %bb.13:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB175_14:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs2
+; RV64-NEXT: addi a2, sp, 32
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB175_16
+; RV64-NEXT: # %bb.15:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB175_16:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 96
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.ceil.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @ceil_v6bf16(ptr %x) {
+; RV32-LABEL: ceil_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: beqz a0, .LBB176_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB176_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs0
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB176_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB176_4:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 2
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB176_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB176_6:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB176_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB176_8:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB176_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB176_10:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB176_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rup
+; RV32-NEXT: fcvt.s.w fa5, a0, rup
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB176_12:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: beqz a0, .LBB176_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB176_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs0
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB176_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB176_4:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 2
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB176_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB176_6:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB176_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB176_8:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB176_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB176_10:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB176_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rup
+; RV64-NEXT: fcvt.s.w fa5, a0, rup
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB176_12:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.ceil.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
define void @ceil_v8f16(ptr %x) {
; ZVFH-LABEL: ceil_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI119_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI119_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI177_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI177_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 3
@@ -2892,15 +30385,14 @@ define void @ceil_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.ceil.v8f16(<8 x half>)
define void @ceil_v6f16(ptr %x) {
; ZVFH-LABEL: ceil_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI120_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI120_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI178_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI178_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -2939,7 +30431,6 @@ define void @ceil_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.ceil.v6f16(<6 x half>)
define void @ceil_v4f32(ptr %x) {
; CHECK-LABEL: ceil_v4f32:
@@ -2963,15 +30454,14 @@ define void @ceil_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.ceil.v4f32(<4 x float>)
define void @ceil_v2f64(ptr %x) {
; CHECK-LABEL: ceil_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI122_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI122_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI180_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI180_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a1, 3
@@ -2987,15 +30477,900 @@ define void @ceil_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.ceil.v2f64(<2 x double>)
+
+define void @floor_v8bf16(ptr %x) {
+; RV32-LABEL: floor_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: .cfi_offset fs1, -32
+; RV32-NEXT: .cfi_offset fs2, -40
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs2, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: beqz a0, .LBB181_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB181_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB181_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa5, rdn
+; RV32-NEXT: fcvt.s.w fa4, a0, rdn
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB181_4:
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: beqz a0, .LBB181_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa5, rdn
+; RV32-NEXT: fcvt.s.w fa4, a0, rdn
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB181_6:
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs1, fa0
+; RV32-NEXT: beqz a0, .LBB181_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa5, rdn
+; RV32-NEXT: fcvt.s.w fa4, a0, rdn
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB181_8:
+; RV32-NEXT: fmv.x.w s1, fs0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB181_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa5, rdn
+; RV32-NEXT: fcvt.s.w fa4, a0, rdn
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB181_10:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs1
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB181_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB181_12:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 6
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB181_14
+; RV32-NEXT: # %bb.13:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB181_14:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs2
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB181_16
+; RV32-NEXT: # %bb.15:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB181_16:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -96
+; RV64-NEXT: .cfi_def_cfa_offset 96
+; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset fs0, -40
+; RV64-NEXT: .cfi_offset fs1, -48
+; RV64-NEXT: .cfi_offset fs2, -56
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs2, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: beqz a0, .LBB181_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB181_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB181_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa5, rdn
+; RV64-NEXT: fcvt.s.w fa4, a0, rdn
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB181_4:
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: beqz a0, .LBB181_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa5, rdn
+; RV64-NEXT: fcvt.s.w fa4, a0, rdn
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB181_6:
+; RV64-NEXT: fmv.x.w s2, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: beqz a0, .LBB181_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa5, rdn
+; RV64-NEXT: fcvt.s.w fa4, a0, rdn
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB181_8:
+; RV64-NEXT: fmv.x.w s1, fs0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB181_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa5, rdn
+; RV64-NEXT: fcvt.s.w fa4, a0, rdn
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB181_10:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs1
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB181_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB181_12:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 6
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB181_14
+; RV64-NEXT: # %bb.13:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB181_14:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs2
+; RV64-NEXT: addi a2, sp, 32
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB181_16
+; RV64-NEXT: # %bb.15:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB181_16:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 96
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.floor.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @floor_v6bf16(ptr %x) {
+; RV32-LABEL: floor_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: beqz a0, .LBB182_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB182_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs0
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB182_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB182_4:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 2
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB182_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB182_6:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB182_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB182_8:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB182_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB182_10:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB182_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rdn
+; RV32-NEXT: fcvt.s.w fa5, a0, rdn
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB182_12:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: beqz a0, .LBB182_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB182_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs0
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB182_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB182_4:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 2
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB182_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB182_6:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB182_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB182_8:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB182_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB182_10:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB182_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rdn
+; RV64-NEXT: fcvt.s.w fa5, a0, rdn
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB182_12:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.floor.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
define void @floor_v8f16(ptr %x) {
; ZVFH-LABEL: floor_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI123_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI123_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI183_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI183_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 2
@@ -3032,15 +31407,14 @@ define void @floor_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.floor.v8f16(<8 x half>)
define void @floor_v6f16(ptr %x) {
; ZVFH-LABEL: floor_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI124_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI124_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI184_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI184_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -3079,7 +31453,6 @@ define void @floor_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.floor.v6f16(<6 x half>)
define void @floor_v4f32(ptr %x) {
; CHECK-LABEL: floor_v4f32:
@@ -3103,15 +31476,14 @@ define void @floor_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.floor.v4f32(<4 x float>)
define void @floor_v2f64(ptr %x) {
; CHECK-LABEL: floor_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI126_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI126_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI186_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI186_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a1, 2
@@ -3127,15 +31499,900 @@ define void @floor_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.floor.v2f64(<2 x double>)
+
+define void @round_v8bf16(ptr %x) {
+; RV32-LABEL: round_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: .cfi_offset fs1, -32
+; RV32-NEXT: .cfi_offset fs2, -40
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs2, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: beqz a0, .LBB187_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB187_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB187_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: fcvt.s.w fa4, a0, rmm
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB187_4:
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: beqz a0, .LBB187_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: fcvt.s.w fa4, a0, rmm
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB187_6:
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs1, fa0
+; RV32-NEXT: beqz a0, .LBB187_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: fcvt.s.w fa4, a0, rmm
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB187_8:
+; RV32-NEXT: fmv.x.w s1, fs0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB187_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa5, rmm
+; RV32-NEXT: fcvt.s.w fa4, a0, rmm
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB187_10:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs1
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB187_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB187_12:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 6
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB187_14
+; RV32-NEXT: # %bb.13:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB187_14:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs2
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB187_16
+; RV32-NEXT: # %bb.15:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB187_16:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -96
+; RV64-NEXT: .cfi_def_cfa_offset 96
+; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset fs0, -40
+; RV64-NEXT: .cfi_offset fs1, -48
+; RV64-NEXT: .cfi_offset fs2, -56
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs2, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: beqz a0, .LBB187_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB187_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB187_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-NEXT: fcvt.s.w fa4, a0, rmm
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB187_4:
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: beqz a0, .LBB187_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-NEXT: fcvt.s.w fa4, a0, rmm
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB187_6:
+; RV64-NEXT: fmv.x.w s2, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: beqz a0, .LBB187_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-NEXT: fcvt.s.w fa4, a0, rmm
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB187_8:
+; RV64-NEXT: fmv.x.w s1, fs0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB187_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa5, rmm
+; RV64-NEXT: fcvt.s.w fa4, a0, rmm
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB187_10:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs1
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB187_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB187_12:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 6
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB187_14
+; RV64-NEXT: # %bb.13:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB187_14:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs2
+; RV64-NEXT: addi a2, sp, 32
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB187_16
+; RV64-NEXT: # %bb.15:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB187_16:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 96
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.round.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
+
+define void @round_v6bf16(ptr %x) {
+; RV32-LABEL: round_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: beqz a0, .LBB188_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB188_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs0
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB188_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB188_4:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 2
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB188_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB188_6:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB188_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB188_8:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB188_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB188_10:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs0
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB188_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0, rmm
+; RV32-NEXT: fcvt.s.w fa5, a0, rmm
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB188_12:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: beqz a0, .LBB188_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB188_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs0
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB188_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB188_4:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 2
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB188_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB188_6:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB188_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB188_8:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB188_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB188_10:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs0
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB188_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0, rmm
+; RV64-NEXT: fcvt.s.w fa5, a0, rmm
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB188_12:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = call <6 x bfloat> @llvm.round.v6bf16(<6 x bfloat> %a)
+ store <6 x bfloat> %b, ptr %x
+ ret void
+}
define void @round_v8f16(ptr %x) {
; ZVFH-LABEL: round_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI127_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI127_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI189_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI189_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 4
@@ -3172,15 +32429,14 @@ define void @round_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.round.v8f16(<8 x half>)
define void @round_v6f16(ptr %x) {
; ZVFH-LABEL: round_v6f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI128_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI128_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI190_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI190_0)(a1)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
@@ -3219,7 +32475,6 @@ define void @round_v6f16(ptr %x) {
store <6 x half> %b, ptr %x
ret void
}
-declare <6 x half> @llvm.round.v6f16(<6 x half>)
define void @round_v4f32(ptr %x) {
; CHECK-LABEL: round_v4f32:
@@ -3243,15 +32498,14 @@ define void @round_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.round.v4f32(<4 x float>)
define void @round_v2f64(ptr %x) {
; CHECK-LABEL: round_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI130_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI130_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI192_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI192_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a1, 4
@@ -3267,15 +32521,552 @@ define void @round_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.round.v2f64(<2 x double>)
+
+define void @rint_v8bf16(ptr %x) {
+; RV32-LABEL: rint_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
+; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset s2, -16
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: .cfi_offset fs1, -32
+; RV32-NEXT: .cfi_offset fs2, -40
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: lui a0, 307200
+; RV32-NEXT: fmv.w.x fs2, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: beqz a0, .LBB193_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: fcvt.w.s a0, fa0
+; RV32-NEXT: fcvt.s.w fa5, a0
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB193_2:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB193_4
+; RV32-NEXT: # %bb.3:
+; RV32-NEXT: fcvt.w.s a0, fa5
+; RV32-NEXT: fcvt.s.w fa4, a0
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB193_4:
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: beqz a0, .LBB193_6
+; RV32-NEXT: # %bb.5:
+; RV32-NEXT: fcvt.w.s a0, fa5
+; RV32-NEXT: fcvt.s.w fa4, a0
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB193_6:
+; RV32-NEXT: fmv.x.w s2, fa0
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs1, fa0
+; RV32-NEXT: beqz a0, .LBB193_8
+; RV32-NEXT: # %bb.7:
+; RV32-NEXT: fcvt.w.s a0, fa5
+; RV32-NEXT: fcvt.s.w fa4, a0
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB193_8:
+; RV32-NEXT: fmv.x.w s1, fs0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: fabs.s fa4, fa5
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: beqz a0, .LBB193_10
+; RV32-NEXT: # %bb.9:
+; RV32-NEXT: fcvt.w.s a0, fa5
+; RV32-NEXT: fcvt.s.w fa4, a0
+; RV32-NEXT: fsgnj.s fa5, fa4, fa5
+; RV32-NEXT: .LBB193_10:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs1
+; RV32-NEXT: fmv.s fa0, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fa5, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa4, fa0
+; RV32-NEXT: flt.s a0, fa4, fs2
+; RV32-NEXT: fmv.x.w s1, fa5
+; RV32-NEXT: beqz a0, .LBB193_12
+; RV32-NEXT: # %bb.11:
+; RV32-NEXT: fcvt.w.s a0, fa0
+; RV32-NEXT: fcvt.s.w fa5, a0
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB193_12:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: fmv.x.w s2, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v9, v9, 6
+; RV32-NEXT: vmv.x.s a0, v9
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a0, fa5, fs2
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a0, .LBB193_14
+; RV32-NEXT: # %bb.13:
+; RV32-NEXT: fcvt.w.s a0, fa0
+; RV32-NEXT: fcvt.s.w fa5, a0
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB193_14:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, s2
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a1, v8
+; RV32-NEXT: slli a1, a1, 16
+; RV32-NEXT: fmv.w.x fa0, a1
+; RV32-NEXT: fabs.s fa5, fa0
+; RV32-NEXT: flt.s a1, fa5, fs2
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: beqz a1, .LBB193_16
+; RV32-NEXT: # %bb.15:
+; RV32-NEXT: fcvt.w.s a0, fa0
+; RV32-NEXT: fcvt.s.w fa5, a0
+; RV32-NEXT: fsgnj.s fa0, fa5, fa0
+; RV32-NEXT: .LBB193_16:
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
+; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: rint_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -96
+; RV64-NEXT: .cfi_def_cfa_offset 96
+; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset s2, -32
+; RV64-NEXT: .cfi_offset fs0, -40
+; RV64-NEXT: .cfi_offset fs1, -48
+; RV64-NEXT: .cfi_offset fs2, -56
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: lui a0, 307200
+; RV64-NEXT: fmv.w.x fs2, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: beqz a0, .LBB193_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: fcvt.w.s a0, fa0
+; RV64-NEXT: fcvt.s.w fa5, a0
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB193_2:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB193_4
+; RV64-NEXT: # %bb.3:
+; RV64-NEXT: fcvt.w.s a0, fa5
+; RV64-NEXT: fcvt.s.w fa4, a0
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB193_4:
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: beqz a0, .LBB193_6
+; RV64-NEXT: # %bb.5:
+; RV64-NEXT: fcvt.w.s a0, fa5
+; RV64-NEXT: fcvt.s.w fa4, a0
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB193_6:
+; RV64-NEXT: fmv.x.w s2, fa0
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs1, fa0
+; RV64-NEXT: beqz a0, .LBB193_8
+; RV64-NEXT: # %bb.7:
+; RV64-NEXT: fcvt.w.s a0, fa5
+; RV64-NEXT: fcvt.s.w fa4, a0
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB193_8:
+; RV64-NEXT: fmv.x.w s1, fs0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: fabs.s fa4, fa5
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: beqz a0, .LBB193_10
+; RV64-NEXT: # %bb.9:
+; RV64-NEXT: fcvt.w.s a0, fa5
+; RV64-NEXT: fcvt.s.w fa4, a0
+; RV64-NEXT: fsgnj.s fa5, fa4, fa5
+; RV64-NEXT: .LBB193_10:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs1
+; RV64-NEXT: fmv.s fa0, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fa5, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa4, fa0
+; RV64-NEXT: flt.s a0, fa4, fs2
+; RV64-NEXT: fmv.x.w s1, fa5
+; RV64-NEXT: beqz a0, .LBB193_12
+; RV64-NEXT: # %bb.11:
+; RV64-NEXT: fcvt.w.s a0, fa0
+; RV64-NEXT: fcvt.s.w fa5, a0
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB193_12:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: fmv.x.w s2, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v9, v9, 6
+; RV64-NEXT: vmv.x.s a0, v9
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a0, fa5, fs2
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a0, .LBB193_14
+; RV64-NEXT: # %bb.13:
+; RV64-NEXT: fcvt.w.s a0, fa0
+; RV64-NEXT: fcvt.s.w fa5, a0
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB193_14:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, s2
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a1, v8
+; RV64-NEXT: slli a1, a1, 16
+; RV64-NEXT: fmv.w.x fa0, a1
+; RV64-NEXT: fabs.s fa5, fa0
+; RV64-NEXT: flt.s a1, fa5, fs2
+; RV64-NEXT: addi a2, sp, 32
+; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: beqz a1, .LBB193_16
+; RV64-NEXT: # %bb.15:
+; RV64-NEXT: fcvt.w.s a0, fa0
+; RV64-NEXT: fcvt.s.w fa5, a0
+; RV64-NEXT: fsgnj.s fa0, fa5, fa0
+; RV64-NEXT: .LBB193_16:
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 96
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.rint.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
define void @rint_v8f16(ptr %x) {
; ZVFH-LABEL: rint_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI131_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI131_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI194_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI194_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -3308,7 +33099,6 @@ define void @rint_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.rint.v8f16(<8 x half>)
define void @rint_v4f32(ptr %x) {
; CHECK-LABEL: rint_v4f32:
@@ -3330,15 +33120,14 @@ define void @rint_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.rint.v4f32(<4 x float>)
define void @rint_v2f64(ptr %x) {
; CHECK-LABEL: rint_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI133_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI133_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI196_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI196_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -3352,15 +33141,372 @@ define void @rint_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.rint.v2f64(<2 x double>)
+
+define void @nearbyint_v8bf16(ptr %x) {
+; RV32-LABEL: nearbyint_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: .cfi_offset fs0, -24
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 1
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: fmv.x.w s1, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: fmv.x.w s1, fs0
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa0, a0
+; RV32-NEXT: call nearbyintf
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -48
+; RV64-NEXT: .cfi_def_cfa_offset 48
+; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: .cfi_offset fs0, -32
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 1
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: fmv.x.w s1, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: fmv.x.w s1, fs0
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa0, a0
+; RV64-NEXT: call nearbyintf
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 48
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = call <8 x bfloat> @llvm.nearbyint.v8bf16(<8 x bfloat> %a)
+ store <8 x bfloat> %b, ptr %x
+ ret void
+}
define void @nearbyint_v8f16(ptr %x) {
; ZVFH-LABEL: nearbyint_v8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI134_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI134_0)(a1)
+; ZVFH-NEXT: lui a1, %hi(.LCPI198_0)
+; ZVFH-NEXT: flh fa5, %lo(.LCPI198_0)(a1)
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a1
@@ -3397,7 +33543,6 @@ define void @nearbyint_v8f16(ptr %x) {
store <8 x half> %b, ptr %x
ret void
}
-declare <8 x half> @llvm.nearbyint.v8f16(<8 x half>)
define void @nearbyint_v4f32(ptr %x) {
; CHECK-LABEL: nearbyint_v4f32:
@@ -3421,15 +33566,14 @@ define void @nearbyint_v4f32(ptr %x) {
store <4 x float> %b, ptr %x
ret void
}
-declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>)
define void @nearbyint_v2f64(ptr %x) {
; CHECK-LABEL: nearbyint_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI136_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI136_0)(a1)
+; CHECK-NEXT: lui a1, %hi(.LCPI200_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI200_0)(a1)
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a1
@@ -3445,7 +33589,1508 @@ define void @nearbyint_v2f64(ptr %x) {
store <2 x double> %b, ptr %x
ret void
}
-declare <2 x double> @llvm.nearbyint.v2f64(<2 x double>)
+
+define void @fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fmuladd_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a2)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 2
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmuladd_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a3, a4, a3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v10, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %d = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmuladd_v6bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fmuladd_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a2)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fadd.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 2
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmuladd_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a3, a4, a3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v10, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fadd.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %d = call <6 x bfloat> @llvm.fmuladd.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %c)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmuladd_v8f16:
@@ -3485,7 +35130,6 @@ define void @fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
store <8 x half> %d, ptr %x
ret void
}
-declare <8 x half> @llvm.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>)
define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmuladd_v6f16:
@@ -3526,7 +35170,6 @@ define void @fmuladd_v6f16(ptr %x, ptr %y, ptr %z) {
store <6 x half> %d, ptr %x
ret void
}
-declare <6 x half> @llvm.fmuladd.v6f16(<6 x half>, <6 x half>, <6 x half>)
define void @fmuladd_v4f32(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fmuladd_v4f32:
@@ -3545,7 +35188,6 @@ define void @fmuladd_v4f32(ptr %x, ptr %y, ptr %z) {
store <4 x float> %d, ptr %x
ret void
}
-declare <4 x float> @llvm.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>)
define void @fmuladd_v2f64(ptr %x, ptr %y, ptr %z) {
; CHECK-LABEL: fmuladd_v2f64:
@@ -3564,7 +35206,1510 @@ define void @fmuladd_v2f64(ptr %x, ptr %y, ptr %z) {
store <2 x double> %d, ptr %x
ret void
}
-declare <2 x double> @llvm.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>)
+
+define void @fmsub_fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fmsub_fmuladd_v8bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a2)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 2
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmsub_fmuladd_v8bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a3, a4, a3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v10, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <8 x bfloat>, ptr %x
+ %b = load <8 x bfloat>, ptr %y
+ %c = load <8 x bfloat>, ptr %z
+ %neg = fneg <8 x bfloat> %c
+ %d = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %neg)
+ store <8 x bfloat> %d, ptr %x
+ ret void
+}
+
+define void @fmsub_fmuladd_v6bf16(ptr %x, ptr %y, ptr %z) {
+; RV32-LABEL: fmsub_fmuladd_v6bf16:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset s1, -12
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a4, a3, 2
+; RV32-NEXT: add a3, a4, a3
+; RV32-NEXT: sub sp, sp, a3
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV32-NEXT: vle16.v v10, (a1)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: vle16.v v9, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a2)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vslidedown.vi v8, v10, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: vslidedown.vi v8, v9, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 3
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 5
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w s1, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 4
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.x v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, s1
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 6
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: addi a0, sp, 32
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 1
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fmul.s fa0, fa4, fa5
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa5, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 7
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: slli a0, a0, 16
+; RV32-NEXT: fmv.w.x fa4, a0
+; RV32-NEXT: fsub.s fa0, fa5, fa4
+; RV32-NEXT: call __truncsfbf2
+; RV32-NEXT: fmv.x.w a0, fa0
+; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 32
+; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV32-NEXT: vse16.v v8, (s0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a1, a0, 2
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
+; RV32-NEXT: ret
+;
+; RV64-LABEL: fmsub_fmuladd_v6bf16:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset s1, -24
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a4, a3, 2
+; RV64-NEXT: add a3, a4, a3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; RV64-NEXT: vle16.v v10, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: mv s0, a0
+; RV64-NEXT: vle16.v v9, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vle16.v v8, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vslidedown.vi v8, v10, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: vslidedown.vi v8, v9, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 1
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 2
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 3
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 5
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w s1, fa0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 4
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, s1
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 6
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: addi a0, sp, 32
+; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 1
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 1
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fmul.s fa0, fa4, fa5
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa5, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 2
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64-NEXT: vslidedown.vi v8, v8, 7
+; RV64-NEXT: vmv.x.s a0, v8
+; RV64-NEXT: slli a0, a0, 16
+; RV64-NEXT: fmv.w.x fa4, a0
+; RV64-NEXT: fsub.s fa0, fa5, fa4
+; RV64-NEXT: call __truncsfbf2
+; RV64-NEXT: fmv.x.w a0, fa0
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 32
+; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
+; RV64-NEXT: vse16.v v8, (s0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a1, a0, 2
+; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
+ %a = load <6 x bfloat>, ptr %x
+ %b = load <6 x bfloat>, ptr %y
+ %c = load <6 x bfloat>, ptr %z
+ %neg = fneg <6 x bfloat> %c
+ %d = call <6 x bfloat> @llvm.fmuladd.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b, <6 x bfloat> %neg)
+ store <6 x bfloat> %d, ptr %x
+ ret void
+}
define void @fmsub_fmuladd_v8f16(ptr %x, ptr %y, ptr %z) {
; ZVFH-LABEL: fmsub_fmuladd_v8f16:
>From 5d0541637fc972e76113931723a3154d0cde03d5 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 15 Oct 2024 17:16:20 +0100
Subject: [PATCH 2/3] [RISCV] Promote fixed-length bf16 arith vector ops with
zvfbfmin
The aim is to have the same set of promotions on fixed-length bf16 vectors as on fixed-length f16 vectors, and then deduplicate them similarly to what was done for scalable vectors.
It looks like fneg/fabs/fcopysign end up getting expanded because fsub is now legal, and the default operation action must be expand.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 7 +
.../CodeGen/RISCV/rvv/fixed-vectors-fp.ll | 33654 +---------------
2 files changed, 1044 insertions(+), 32617 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index cde690793f0702..ae61b03a4aa3b6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1380,6 +1380,13 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
{ISD::VP_MERGE, ISD::VP_SELECT, ISD::VSELECT, ISD::SELECT}, VT,
Custom);
// TODO: Promote to fp32.
+ MVT F32VecVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ // Don't promote f16 vector operations to f32 if f32 vector type is
+ // not legal.
+ // TODO: could split the f16 vector into two vectors and do promotion.
+ if (!isTypeLegal(F32VecVT))
+ continue;
+ setOperationPromotedToType(ZvfhminZvfbfminPromoteOps, VT, F32VecVT);
continue;
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index ff00aaf45fcf1d..c24ade1e6d8eff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1,504 +1,24 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
define void @fadd_v8bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fadd_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fadd_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fadd_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = fadd <8 x bfloat> %a, %b
@@ -507,501 +27,20 @@ define void @fadd_v8bf16(ptr %x, ptr %y) {
}
define void @fadd_v6bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fadd_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fadd_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fadd_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = fadd <6 x bfloat> %a, %b
@@ -1103,499 +142,19 @@ define void @fadd_v2f64(ptr %x, ptr %y) {
}
define void @fsub_v8bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fsub_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fsub_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fsub_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = fsub <8 x bfloat> %a, %b
@@ -1604,501 +163,20 @@ define void @fsub_v8bf16(ptr %x, ptr %y) {
}
define void @fsub_v6bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fsub_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fsub_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fsub_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = fsub <6 x bfloat> %a, %b
@@ -2200,499 +278,19 @@ define void @fsub_v2f64(ptr %x, ptr %y) {
}
define void @fmul_v8bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fmul_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmul_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fmul_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = fmul <8 x bfloat> %a, %b
@@ -2701,501 +299,20 @@ define void @fmul_v8bf16(ptr %x, ptr %y) {
}
define void @fmul_v6bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fmul_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmul_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fmul_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = fmul <6 x bfloat> %a, %b
@@ -3297,499 +414,19 @@ define void @fmul_v2f64(ptr %x, ptr %y) {
}
define void @fdiv_v8bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fdiv_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fdiv_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fdiv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = fdiv <8 x bfloat> %a, %b
@@ -3798,501 +435,20 @@ define void @fdiv_v8bf16(ptr %x, ptr %y) {
}
define void @fdiv_v6bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fdiv_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fdiv_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fdiv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = fdiv <6 x bfloat> %a, %b
@@ -4394,353 +550,14 @@ define void @fdiv_v2f64(ptr %x, ptr %y) {
}
define void @fneg_v8bf16(ptr %x) {
-; RV32-LABEL: fneg_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
-; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s1, 524288
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 32
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fneg_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fneg_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = fneg <8 x bfloat> %a
store <8 x bfloat> %b, ptr %x
@@ -4748,355 +565,14 @@ define void @fneg_v8bf16(ptr %x) {
}
define void @fneg_v6bf16(ptr %x) {
-; RV32-LABEL: fneg_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
-; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s1, 524288
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 32
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fneg_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fneg_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = fneg <6 x bfloat> %a
store <6 x bfloat> %b, ptr %x
@@ -5178,351 +654,15 @@ define void @fneg_v2f64(ptr %x) {
}
define void @fabs_v8bf16(ptr %x) {
-; RV32-LABEL: fabs_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fabs_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fabs_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.fabs.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -5530,353 +670,15 @@ define void @fabs_v8bf16(ptr %x) {
}
define void @fabs_v6bf16(ptr %x) {
-; RV32-LABEL: fabs_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 17
-; RV32-NEXT: srli a0, a0, 1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fabs_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fabs_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = call <6 x bfloat> @llvm.fabs.v6bf16(<6 x bfloat> %a)
store <6 x bfloat> %b, ptr %x
@@ -5960,155 +762,18 @@ define void @fabs_v2f64(ptr %x) {
}
define void @copysign_v8bf16(ptr %x, ptr %y) {
-; RV32-LABEL: copysign_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: vslidedown.vi v10, v8, 1
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: lui a1, 1048568
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v10, v9, 1
-; RV32-NEXT: vmv.x.s a3, v10
-; RV32-NEXT: lui a4, 8
-; RV32-NEXT: addi a5, a4, -1
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: and a3, a3, a4
-; RV32-NEXT: vmv.x.s a6, v9
-; RV32-NEXT: and a6, a6, a5
-; RV32-NEXT: or a3, a6, a3
-; RV32-NEXT: vmv.v.x v10, a3
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 2
-; RV32-NEXT: vmv.x.s a2, v11
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v11, v9, 2
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 3
-; RV32-NEXT: vmv.x.s a2, v11
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v11, v9, 3
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 5
-; RV32-NEXT: vmv.x.s a2, v11
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v11, v9, 5
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 4
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a4
-; RV32-NEXT: vslidedown.vi v11, v9, 4
-; RV32-NEXT: vmv.x.s a4, v11
-; RV32-NEXT: and a4, a4, a5
-; RV32-NEXT: or a3, a4, a3
-; RV32-NEXT: vmv.v.x v11, a3
-; RV32-NEXT: vslide1down.vx v11, v11, a2
-; RV32-NEXT: vslidedown.vi v12, v8, 6
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v12, v9, 6
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslide1down.vx v11, v11, a2
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: and a2, a2, a5
-; RV32-NEXT: or a1, a2, a1
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: vslide1down.vx v8, v11, a1
-; RV32-NEXT: vslidedown.vi v8, v10, 4, v0.t
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: lui a1, 1048568
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v10, v9, 1
-; RV64-NEXT: vmv.x.s a3, v10
-; RV64-NEXT: lui a4, 8
-; RV64-NEXT: addiw a5, a4, -1
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vmv.x.s a3, v8
-; RV64-NEXT: and a3, a3, a4
-; RV64-NEXT: vmv.x.s a6, v9
-; RV64-NEXT: and a6, a6, a5
-; RV64-NEXT: or a3, a6, a3
-; RV64-NEXT: vmv.v.x v10, a3
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 2
-; RV64-NEXT: vmv.x.s a2, v11
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v11, v9, 2
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 3
-; RV64-NEXT: vmv.x.s a2, v11
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v11, v9, 3
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 5
-; RV64-NEXT: vmv.x.s a2, v11
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v11, v9, 5
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 4
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a4
-; RV64-NEXT: vslidedown.vi v11, v9, 4
-; RV64-NEXT: vmv.x.s a4, v11
-; RV64-NEXT: and a4, a4, a5
-; RV64-NEXT: or a3, a4, a3
-; RV64-NEXT: vmv.v.x v11, a3
-; RV64-NEXT: vslide1down.vx v11, v11, a2
-; RV64-NEXT: vslidedown.vi v12, v8, 6
-; RV64-NEXT: vmv.x.s a2, v12
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v12, v9, 6
-; RV64-NEXT: vmv.x.s a3, v12
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslide1down.vx v11, v11, a2
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a2, v8
-; RV64-NEXT: and a1, a2, a1
-; RV64-NEXT: vslidedown.vi v8, v9, 7
-; RV64-NEXT: vmv.x.s a2, v8
-; RV64-NEXT: and a2, a2, a5
-; RV64-NEXT: or a1, a2, a1
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: vslide1down.vx v8, v11, a1
-; RV64-NEXT: vslidedown.vi v8, v10, 4, v0.t
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b)
@@ -6117,159 +782,20 @@ define void @copysign_v8bf16(ptr %x, ptr %y) {
}
define void @copysign_v6bf16(ptr %x, ptr %y) {
-; RV32-LABEL: copysign_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: vslidedown.vi v10, v8, 1
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: lui a1, 1048568
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v10, v9, 1
-; RV32-NEXT: vmv.x.s a3, v10
-; RV32-NEXT: lui a4, 8
-; RV32-NEXT: addi a5, a4, -1
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vmv.x.s a3, v8
-; RV32-NEXT: and a3, a3, a4
-; RV32-NEXT: vmv.x.s a6, v9
-; RV32-NEXT: and a6, a6, a5
-; RV32-NEXT: or a3, a6, a3
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v10, a3
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 2
-; RV32-NEXT: vmv.x.s a2, v11
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v11, v9, 2
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 3
-; RV32-NEXT: vmv.x.s a2, v11
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v11, v9, 3
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 5
-; RV32-NEXT: vmv.x.s a2, v11
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v11, v9, 5
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 4
-; RV32-NEXT: vmv.x.s a3, v11
-; RV32-NEXT: and a3, a3, a4
-; RV32-NEXT: vslidedown.vi v11, v9, 4
-; RV32-NEXT: vmv.x.s a4, v11
-; RV32-NEXT: and a4, a4, a5
-; RV32-NEXT: or a3, a4, a3
-; RV32-NEXT: vmv.v.x v11, a3
-; RV32-NEXT: vslide1down.vx v11, v11, a2
-; RV32-NEXT: vslidedown.vi v12, v8, 6
-; RV32-NEXT: vmv.x.s a2, v12
-; RV32-NEXT: and a2, a2, a1
-; RV32-NEXT: vslidedown.vi v12, v9, 6
-; RV32-NEXT: vmv.x.s a3, v12
-; RV32-NEXT: and a3, a3, a5
-; RV32-NEXT: or a2, a3, a2
-; RV32-NEXT: vslide1down.vx v11, v11, a2
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: and a2, a2, a5
-; RV32-NEXT: or a1, a2, a1
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: vslide1down.vx v8, v11, a1
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v10, 4, v0.t
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: vslidedown.vi v10, v8, 1
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: lui a1, 1048568
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v10, v9, 1
-; RV64-NEXT: vmv.x.s a3, v10
-; RV64-NEXT: lui a4, 8
-; RV64-NEXT: addiw a5, a4, -1
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vmv.x.s a3, v8
-; RV64-NEXT: and a3, a3, a4
-; RV64-NEXT: vmv.x.s a6, v9
-; RV64-NEXT: and a6, a6, a5
-; RV64-NEXT: or a3, a6, a3
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v10, a3
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 2
-; RV64-NEXT: vmv.x.s a2, v11
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v11, v9, 2
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 3
-; RV64-NEXT: vmv.x.s a2, v11
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v11, v9, 3
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 5
-; RV64-NEXT: vmv.x.s a2, v11
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v11, v9, 5
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 4
-; RV64-NEXT: vmv.x.s a3, v11
-; RV64-NEXT: and a3, a3, a4
-; RV64-NEXT: vslidedown.vi v11, v9, 4
-; RV64-NEXT: vmv.x.s a4, v11
-; RV64-NEXT: and a4, a4, a5
-; RV64-NEXT: or a3, a4, a3
-; RV64-NEXT: vmv.v.x v11, a3
-; RV64-NEXT: vslide1down.vx v11, v11, a2
-; RV64-NEXT: vslidedown.vi v12, v8, 6
-; RV64-NEXT: vmv.x.s a2, v12
-; RV64-NEXT: and a2, a2, a1
-; RV64-NEXT: vslidedown.vi v12, v9, 6
-; RV64-NEXT: vmv.x.s a3, v12
-; RV64-NEXT: and a3, a3, a5
-; RV64-NEXT: or a2, a3, a2
-; RV64-NEXT: vslide1down.vx v11, v11, a2
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a2, v8
-; RV64-NEXT: and a1, a2, a1
-; RV64-NEXT: vslidedown.vi v8, v9, 7
-; RV64-NEXT: vmv.x.s a2, v8
-; RV64-NEXT: and a2, a2, a5
-; RV64-NEXT: or a1, a2, a1
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: vslide1down.vx v8, v11, a1
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v10, 4, v0.t
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = call <6 x bfloat> @llvm.copysign.v6bf16(<6 x bfloat> %a, <6 x bfloat> %b)
@@ -6370,111 +896,19 @@ define void @copysign_v2f64(ptr %x, ptr %y) {
}
define void @copysign_vf_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: copysign_vf_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: fmv.x.w a1, fa0
-; RV32-NEXT: lui a2, 1048568
-; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vmv.x.s a2, v9
-; RV32-NEXT: lui a3, 8
-; RV32-NEXT: addi a3, a3, -1
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vmv.x.s a4, v8
-; RV32-NEXT: and a4, a4, a3
-; RV32-NEXT: or a4, a4, a1
-; RV32-NEXT: vmv.v.x v9, a4
-; RV32-NEXT: vslide1down.vx v9, v9, a2
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a2
-; RV32-NEXT: vslidedown.vi v10, v8, 3
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a2
-; RV32-NEXT: vslidedown.vi v10, v8, 5
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vslidedown.vi v10, v8, 4
-; RV32-NEXT: vmv.x.s a4, v10
-; RV32-NEXT: and a4, a4, a3
-; RV32-NEXT: or a4, a4, a1
-; RV32-NEXT: vmv.v.x v10, a4
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 6
-; RV32-NEXT: vmv.x.s a2, v11
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a2, v8
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a1, a2, a1
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: vslide1down.vx v8, v10, a1
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_vf_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: fmv.x.w a1, fa0
-; RV64-NEXT: lui a2, 1048568
-; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vmv.x.s a2, v9
-; RV64-NEXT: lui a3, 8
-; RV64-NEXT: addiw a3, a3, -1
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vmv.x.s a4, v8
-; RV64-NEXT: and a4, a4, a3
-; RV64-NEXT: or a4, a4, a1
-; RV64-NEXT: vmv.v.x v9, a4
-; RV64-NEXT: vslide1down.vx v9, v9, a2
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a2
-; RV64-NEXT: vslidedown.vi v10, v8, 3
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a2
-; RV64-NEXT: vslidedown.vi v10, v8, 5
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vslidedown.vi v10, v8, 4
-; RV64-NEXT: vmv.x.s a4, v10
-; RV64-NEXT: and a4, a4, a3
-; RV64-NEXT: or a4, a4, a1
-; RV64-NEXT: vmv.v.x v10, a4
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 6
-; RV64-NEXT: vmv.x.s a2, v11
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a2, v8
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a1, a2, a1
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: vslide1down.vx v8, v10, a1
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -6484,111 +918,21 @@ define void @copysign_vf_v8bf16(ptr %x, bfloat %y) {
}
define void @copysign_vf_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: copysign_vf_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: fmv.x.w a1, fa0
-; RV32-NEXT: lui a2, 1048568
-; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vslidedown.vi v9, v8, 1
-; RV32-NEXT: vmv.x.s a2, v9
-; RV32-NEXT: lui a3, 8
-; RV32-NEXT: addi a3, a3, -1
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vmv.x.s a4, v8
-; RV32-NEXT: and a4, a4, a3
-; RV32-NEXT: or a4, a4, a1
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v9, a4
-; RV32-NEXT: vslide1down.vx v9, v9, a2
-; RV32-NEXT: vslidedown.vi v10, v8, 2
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a2
-; RV32-NEXT: vslidedown.vi v10, v8, 3
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a2
-; RV32-NEXT: vslidedown.vi v10, v8, 5
-; RV32-NEXT: vmv.x.s a2, v10
-; RV32-NEXT: and a2, a2, a3
-; RV32-NEXT: or a2, a2, a1
-; RV32-NEXT: vslidedown.vi v10, v8, 4
-; RV32-NEXT: vmv.x.s a4, v10
-; RV32-NEXT: and a4, a4, a3
-; RV32-NEXT: or a1, a4, a1
-; RV32-NEXT: vmv.v.x v10, a1
-; RV32-NEXT: vslide1down.vx v10, v10, a2
-; RV32-NEXT: vslidedown.vi v11, v8, 6
-; RV32-NEXT: vmv.x.s a1, v11
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: vslide1down.vx v10, v10, a1
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: vslide1down.vx v8, v10, a1
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (a0)
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_vf_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: fmv.x.w a1, fa0
-; RV64-NEXT: lui a2, 1048568
-; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vslidedown.vi v9, v8, 1
-; RV64-NEXT: vmv.x.s a2, v9
-; RV64-NEXT: lui a3, 8
-; RV64-NEXT: addiw a3, a3, -1
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vmv.x.s a4, v8
-; RV64-NEXT: and a4, a4, a3
-; RV64-NEXT: or a4, a4, a1
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v9, a4
-; RV64-NEXT: vslide1down.vx v9, v9, a2
-; RV64-NEXT: vslidedown.vi v10, v8, 2
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a2
-; RV64-NEXT: vslidedown.vi v10, v8, 3
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a2
-; RV64-NEXT: vslidedown.vi v10, v8, 5
-; RV64-NEXT: vmv.x.s a2, v10
-; RV64-NEXT: and a2, a2, a3
-; RV64-NEXT: or a2, a2, a1
-; RV64-NEXT: vslidedown.vi v10, v8, 4
-; RV64-NEXT: vmv.x.s a4, v10
-; RV64-NEXT: and a4, a4, a3
-; RV64-NEXT: or a1, a4, a1
-; RV64-NEXT: vmv.v.x v10, a1
-; RV64-NEXT: vslide1down.vx v10, v10, a2
-; RV64-NEXT: vslidedown.vi v11, v8, 6
-; RV64-NEXT: vmv.x.s a1, v11
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: vslide1down.vx v10, v10, a1
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: vslide1down.vx v8, v10, a1
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (a0)
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -6692,424 +1036,19 @@ define void @copysign_vf_v2f64(ptr %x, double %y) {
}
define void @copysign_neg_v8bf16(ptr %x, ptr %y) {
-; RV32-LABEL: copysign_neg_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 2 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v9, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s4, 524288
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s3, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s6, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s7, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s8, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v11, 1
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: lui a2, 8
-; RV32-NEXT: addi a3, a2, -1
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: lui a4, 1048568
-; RV32-NEXT: and a0, a0, a4
-; RV32-NEXT: or a0, a1, a0
-; RV32-NEXT: vmv.x.s a1, v11
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: and a5, s8, a2
-; RV32-NEXT: or a1, a1, a5
-; RV32-NEXT: vmv.v.x v8, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vslidedown.vi v9, v11, 2
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s7, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vslidedown.vi v9, v11, 3
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s6, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vslidedown.vi v9, v11, 5
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s5, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslidedown.vi v9, v11, 4
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: and a2, s3, a2
-; RV32-NEXT: or a1, a1, a2
-; RV32-NEXT: vmv.v.x v9, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a0
-; RV32-NEXT: vslidedown.vi v10, v11, 6
-; RV32-NEXT: vmv.x.s a0, v10
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s2, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a0
-; RV32-NEXT: vslidedown.vi v10, v11, 7
-; RV32-NEXT: vmv.x.s a0, v10
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a4, s1, a4
-; RV32-NEXT: or a0, a0, a4
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: vslide1down.vx v9, v9, a0
-; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; RV32-NEXT: vse16.v v9, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_neg_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -160
-; RV64-NEXT: .cfi_def_cfa_offset 160
-; RV64-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs3, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs4, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs5, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: .cfi_offset s6, -64
-; RV64-NEXT: .cfi_offset s7, -72
-; RV64-NEXT: .cfi_offset fs0, -80
-; RV64-NEXT: .cfi_offset fs1, -88
-; RV64-NEXT: .cfi_offset fs2, -96
-; RV64-NEXT: .cfi_offset fs3, -104
-; RV64-NEXT: .cfi_offset fs4, -112
-; RV64-NEXT: .cfi_offset fs5, -120
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 160 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs2, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs3, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.x.w s2, fs5
-; RV64-NEXT: fmv.x.w s3, fs4
-; RV64-NEXT: fmv.x.w s4, fs3
-; RV64-NEXT: fmv.x.w s5, fs2
-; RV64-NEXT: fmv.x.w s6, fs1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: fmv.x.w s7, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v11, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: lui a2, 8
-; RV64-NEXT: addiw a3, a2, -1
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: lui a4, 1048568
-; RV64-NEXT: and a0, a0, a4
-; RV64-NEXT: or a0, a1, a0
-; RV64-NEXT: vmv.x.s a1, v11
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: and a5, s7, a2
-; RV64-NEXT: or a1, a1, a5
-; RV64-NEXT: vmv.v.x v8, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vslidedown.vi v9, v11, 2
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s6, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vslidedown.vi v9, v11, 3
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s5, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vslidedown.vi v9, v11, 5
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s4, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslidedown.vi v9, v11, 4
-; RV64-NEXT: vmv.x.s a1, v9
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: and a2, s3, a2
-; RV64-NEXT: or a1, a1, a2
-; RV64-NEXT: vmv.v.x v9, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a0
-; RV64-NEXT: vslidedown.vi v10, v11, 6
-; RV64-NEXT: vmv.x.s a0, v10
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s2, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a0
-; RV64-NEXT: vslidedown.vi v10, v11, 7
-; RV64-NEXT: vmv.x.s a0, v10
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a4, s1, a4
-; RV64-NEXT: or a0, a0, a4
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: vslide1down.vx v9, v9, a0
-; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; RV64-NEXT: vse16.v v9, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs3, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs4, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs5, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 160
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_neg_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a2
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = fneg <8 x bfloat> %b
@@ -7119,426 +1058,21 @@ define void @copysign_neg_v8bf16(ptr %x, ptr %y) {
}
define void @copysign_neg_v6bf16(ptr %x, ptr %y) {
-; RV32-LABEL: copysign_neg_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 2 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v9, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s4, 524288
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s3, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s6, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s7, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s8, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s4
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v11, 1
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: lui a2, 8
-; RV32-NEXT: addi a3, a2, -1
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: lui a4, 1048568
-; RV32-NEXT: and a0, a0, a4
-; RV32-NEXT: or a0, a1, a0
-; RV32-NEXT: vmv.x.s a1, v11
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: and a5, s8, a2
-; RV32-NEXT: or a1, a1, a5
-; RV32-NEXT: vmv.v.x v8, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vslidedown.vi v9, v11, 2
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s7, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vslidedown.vi v9, v11, 3
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s6, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vslidedown.vi v9, v11, 5
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s5, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslidedown.vi v9, v11, 4
-; RV32-NEXT: vmv.x.s a1, v9
-; RV32-NEXT: and a1, a1, a3
-; RV32-NEXT: and a2, s3, a2
-; RV32-NEXT: or a1, a1, a2
-; RV32-NEXT: vmv.v.x v9, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a0
-; RV32-NEXT: vslidedown.vi v10, v11, 6
-; RV32-NEXT: vmv.x.s a0, v10
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a1, s2, a4
-; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: vslide1down.vx v9, v9, a0
-; RV32-NEXT: vslidedown.vi v10, v11, 7
-; RV32-NEXT: vmv.x.s a0, v10
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: and a4, s1, a4
-; RV32-NEXT: or a0, a0, a4
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: vslide1down.vx v9, v9, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; RV32-NEXT: vse16.v v9, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_neg_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -160
-; RV64-NEXT: .cfi_def_cfa_offset 160
-; RV64-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs3, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs4, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs5, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: .cfi_offset s6, -64
-; RV64-NEXT: .cfi_offset s7, -72
-; RV64-NEXT: .cfi_offset fs0, -80
-; RV64-NEXT: .cfi_offset fs1, -88
-; RV64-NEXT: .cfi_offset fs2, -96
-; RV64-NEXT: .cfi_offset fs3, -104
-; RV64-NEXT: .cfi_offset fs4, -112
-; RV64-NEXT: .cfi_offset fs5, -120
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xa0, 0x01, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 160 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs2, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs3, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.x.w s2, fs5
-; RV64-NEXT: fmv.x.w s3, fs4
-; RV64-NEXT: fmv.x.w s4, fs3
-; RV64-NEXT: fmv.x.w s5, fs2
-; RV64-NEXT: fmv.x.w s6, fs1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: fmv.x.w s7, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v11, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v11, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: lui a2, 8
-; RV64-NEXT: addiw a3, a2, -1
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: lui a4, 1048568
-; RV64-NEXT: and a0, a0, a4
-; RV64-NEXT: or a0, a1, a0
-; RV64-NEXT: vmv.x.s a1, v11
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: and a5, s7, a2
-; RV64-NEXT: or a1, a1, a5
-; RV64-NEXT: vmv.v.x v8, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vslidedown.vi v9, v11, 2
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s6, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vslidedown.vi v9, v11, 3
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s5, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vslidedown.vi v9, v11, 5
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s4, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslidedown.vi v9, v11, 4
-; RV64-NEXT: vmv.x.s a1, v9
-; RV64-NEXT: and a1, a1, a3
-; RV64-NEXT: and a2, s3, a2
-; RV64-NEXT: or a1, a1, a2
-; RV64-NEXT: vmv.v.x v9, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a0
-; RV64-NEXT: vslidedown.vi v10, v11, 6
-; RV64-NEXT: vmv.x.s a0, v10
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a1, s2, a4
-; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: vslide1down.vx v9, v9, a0
-; RV64-NEXT: vslidedown.vi v10, v11, 7
-; RV64-NEXT: vmv.x.s a0, v10
-; RV64-NEXT: and a0, a0, a3
-; RV64-NEXT: and a4, s1, a4
-; RV64-NEXT: or a0, a0, a4
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: vslide1down.vx v9, v9, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
-; RV64-NEXT: vse16.v v9, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs3, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs4, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs5, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 160
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_neg_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v9, v9, a2
+; CHECK-NEXT: vand.vx v8, v8, a1
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vor.vv v8, v9, v8
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = fneg <6 x bfloat> %b
@@ -7646,287 +1180,20 @@ define void @copysign_neg_v2f64(ptr %x, ptr %y) {
}
define void @copysign_neg_trunc_v4bf16_v4f32(ptr %x, ptr %y) {
-; RV32-LABEL: copysign_neg_trunc_v4bf16_v4f32:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a3, a2, 1
-; RV32-NEXT: add a2, a3, a2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle32.v v8, (a1)
-; RV32-NEXT: vslidedown.vi v9, v9, 1
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: lui s4, 8
-; RV32-NEXT: addi s1, s4, -1
-; RV32-NEXT: and s5, a0, s1
-; RV32-NEXT: vfncvtbf16.f.f.w v9, v8
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s2, 524288
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: lui s3, 1048568
-; RV32-NEXT: and a0, a0, s3
-; RV32-NEXT: or s5, s5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: and s6, a0, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: and a0, a0, s4
-; RV32-NEXT: or a0, s6, a0
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s5
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: and s4, a0, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: and a0, a0, s3
-; RV32-NEXT: or a0, s4, a0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: and s1, a0, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: and a0, a0, s3
-; RV32-NEXT: or a0, s1, a0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_neg_trunc_v4bf16_v4f32:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -96
-; RV64-NEXT: .cfi_def_cfa_offset 96
-; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a3, a2, 1
-; RV64-NEXT: add a2, a3, a2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle32.v v8, (a1)
-; RV64-NEXT: vslidedown.vi v9, v9, 1
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: lui s3, 8
-; RV64-NEXT: addiw s1, s3, -1
-; RV64-NEXT: and s4, a0, s1
-; RV64-NEXT: vfncvtbf16.f.f.w v9, v8
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: lui s2, 1048568
-; RV64-NEXT: and a0, a0, s2
-; RV64-NEXT: or s4, s4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: and s5, a0, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: and a0, a0, s3
-; RV64-NEXT: or a0, s5, a0
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: and s3, a0, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: and a0, a0, s2
-; RV64-NEXT: or a0, s3, a0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: and s1, a0, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: and a0, a0, s2
-; RV64-NEXT: or a0, s1, a0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 96
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_neg_trunc_v4bf16_v4f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle32.v v9, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v9
+; CHECK-NEXT: vxor.vx v9, v10, a1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <4 x bfloat>, ptr %x
%b = load <4 x float>, ptr %y
%c = fneg <4 x float> %b
@@ -7937,291 +1204,22 @@ define void @copysign_neg_trunc_v4bf16_v4f32(ptr %x, ptr %y) {
}
define void @copysign_neg_trunc_v3bf16_v3f32(ptr %x, ptr %y) {
-; RV32-LABEL: copysign_neg_trunc_v3bf16_v3f32:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a3, a2, 1
-; RV32-NEXT: add a2, a3, a2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle32.v v8, (a1)
-; RV32-NEXT: vslidedown.vi v9, v9, 1
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: lui s4, 8
-; RV32-NEXT: addi s1, s4, -1
-; RV32-NEXT: and s5, a0, s1
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vfncvtbf16.f.f.w v9, v8
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s2, 524288
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: lui s3, 1048568
-; RV32-NEXT: and a0, a0, s3
-; RV32-NEXT: or s5, s5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: and s6, a0, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: and a0, a0, s4
-; RV32-NEXT: or a0, s6, a0
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s5
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: and s4, a0, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: and a0, a0, s3
-; RV32-NEXT: or a0, s4, a0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: and s1, a0, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s2
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: and a0, a0, s3
-; RV32-NEXT: or a0, s1, a0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: copysign_neg_trunc_v3bf16_v3f32:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -96
-; RV64-NEXT: .cfi_def_cfa_offset 96
-; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a3, a2, 1
-; RV64-NEXT: add a2, a3, a2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle32.v v8, (a1)
-; RV64-NEXT: vslidedown.vi v9, v9, 1
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: lui s3, 8
-; RV64-NEXT: addiw s1, s3, -1
-; RV64-NEXT: and s4, a0, s1
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vfncvtbf16.f.f.w v9, v8
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: lui s2, 1048568
-; RV64-NEXT: and a0, a0, s2
-; RV64-NEXT: or s4, s4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: and s5, a0, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: and a0, a0, s3
-; RV64-NEXT: or a0, s5, a0
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: and s3, a0, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: and a0, a0, s2
-; RV64-NEXT: or a0, s3, a0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: and s1, a0, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: and a0, a0, s2
-; RV64-NEXT: or a0, s1, a0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 96
-; RV64-NEXT: ret
+; CHECK-LABEL: copysign_neg_trunc_v3bf16_v3f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle32.v v9, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a2, a1, -1
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vand.vx v8, v8, a2
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v9
+; CHECK-NEXT: vxor.vx v9, v10, a1
+; CHECK-NEXT: vand.vx v9, v9, a1
+; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; CHECK-NEXT: vor.vv v8, v8, v9
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <3 x bfloat>, ptr %x
%b = load <3 x float>, ptr %y
%c = fneg <3 x float> %b
@@ -8324,351 +1322,17 @@ define void @copysign_neg_ext_v2f64_v2f32(ptr %x, ptr %y) {
}
define void @sqrt_v8bf16(ptr %x) {
-; RV32-LABEL: sqrt_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa5, fa5
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa5, fa5
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: sqrt_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: sqrt_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsqrt.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.sqrt.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -8676,245 +1340,18 @@ define void @sqrt_v8bf16(ptr %x) {
}
define void @sqrt_v6bf16(ptr %x) {
-; RV32-LABEL: sqrt_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa5, fa5
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsqrt.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: sqrt_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsqrt.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: sqrt_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsqrt.v v8, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = call <6 x bfloat> @llvm.sqrt.v6bf16(<6 x bfloat> %a)
store <6 x bfloat> %b, ptr %x
@@ -9003,643 +1440,21 @@ define void @sqrt_v2f64(ptr %x) {
}
define void @fma_v8bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fma_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a2)
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 32
-; RV32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v10, (a1)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 2
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fma_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 32
-; RV64-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v10, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fma_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = load <8 x bfloat>, ptr %z
@@ -9649,645 +1464,22 @@ define void @fma_v8bf16(ptr %x, ptr %y, ptr %z) {
}
define void @fma_v6bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fma_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a2)
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 32
-; RV32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v10, (a1)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 2
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fma_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 32
-; RV64-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v10, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fma_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = load <6 x bfloat>, ptr %z
@@ -10402,809 +1594,23 @@ define void @fma_v2f64(ptr %x, ptr %y, ptr %z) {
}
define void @fmsub_v8bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fmsub_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 2
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v9, (a2)
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs1r.v v9, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a2, a0, 1
-; RV32-NEXT: add a0, a2, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s6, 524288
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s3, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s7, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s8, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s6, fa0
-; RV32-NEXT: slli s8, s8, 16
-; RV32-NEXT: fmv.w.x fa5, s8
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s6
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s7, s7, 16
-; RV32-NEXT: fmv.w.x fa5, s7
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s5, s5, 16
-; RV32-NEXT: fmv.w.x fa5, s5
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s4, s4, 16
-; RV32-NEXT: fmv.w.x fa5, s4
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: slli s3, s3, 16
-; RV32-NEXT: fmv.w.x fa5, s3
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s4
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s2, s2, 16
-; RV32-NEXT: fmv.w.x fa5, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s1, s1, 16
-; RV32-NEXT: fmv.w.x fa5, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmsub_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -144
-; RV64-NEXT: .cfi_def_cfa_offset 144
-; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: .cfi_offset s6, -64
-; RV64-NEXT: .cfi_offset s7, -72
-; RV64-NEXT: .cfi_offset s8, -80
-; RV64-NEXT: .cfi_offset fs0, -88
-; RV64-NEXT: .cfi_offset fs1, -96
-; RV64-NEXT: .cfi_offset fs2, -104
-; RV64-NEXT: .cfi_offset fs3, -112
-; RV64-NEXT: .cfi_offset fs4, -120
-; RV64-NEXT: .cfi_offset fs5, -128
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a3, a3, 2
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 4 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a3, a0, 1
-; RV64-NEXT: add a0, a3, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs2, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs3, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.x.w s2, fs5
-; RV64-NEXT: fmv.x.w s3, fs4
-; RV64-NEXT: fmv.x.w s4, fs3
-; RV64-NEXT: fmv.x.w s5, fs2
-; RV64-NEXT: fmv.x.w s6, fs1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: fmv.x.w s7, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa5, a1
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa4, a1
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s8, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s7, s7, 16
-; RV64-NEXT: fmv.w.x fa3, s7
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s8
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s6, s6, 16
-; RV64-NEXT: fmv.w.x fa3, s6
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s5, s5, 16
-; RV64-NEXT: fmv.w.x fa3, s5
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s4, s4, 16
-; RV64-NEXT: fmv.w.x fa3, s4
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s3, s3, 16
-; RV64-NEXT: fmv.w.x fa3, s3
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s2, s2, 16
-; RV64-NEXT: fmv.w.x fa3, s2
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s1, s1, 16
-; RV64-NEXT: fmv.w.x fa3, s1
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 144
-; RV64-NEXT: ret
+; CHECK-LABEL: fmsub_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = load <8 x bfloat>, ptr %z
@@ -11215,811 +1621,24 @@ define void @fmsub_v8bf16(ptr %x, ptr %y, ptr %z) {
}
define void @fmsub_v6bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fmsub_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 2
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v9, (a2)
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs1r.v v9, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a2, a0, 1
-; RV32-NEXT: add a0, a2, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s6, 524288
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s3, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s7, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s8, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s6, fa0
-; RV32-NEXT: slli s8, s8, 16
-; RV32-NEXT: fmv.w.x fa5, s8
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s6
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s7, s7, 16
-; RV32-NEXT: fmv.w.x fa5, s7
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s5, s5, 16
-; RV32-NEXT: fmv.w.x fa5, s5
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s4, s4, 16
-; RV32-NEXT: fmv.w.x fa5, s4
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: slli s3, s3, 16
-; RV32-NEXT: fmv.w.x fa5, s3
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s4
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s2, s2, 16
-; RV32-NEXT: fmv.w.x fa5, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s1, s1, 16
-; RV32-NEXT: fmv.w.x fa5, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmsub_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -144
-; RV64-NEXT: .cfi_def_cfa_offset 144
-; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: .cfi_offset s6, -64
-; RV64-NEXT: .cfi_offset s7, -72
-; RV64-NEXT: .cfi_offset s8, -80
-; RV64-NEXT: .cfi_offset fs0, -88
-; RV64-NEXT: .cfi_offset fs1, -96
-; RV64-NEXT: .cfi_offset fs2, -104
-; RV64-NEXT: .cfi_offset fs3, -112
-; RV64-NEXT: .cfi_offset fs4, -120
-; RV64-NEXT: .cfi_offset fs5, -128
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a3, a3, 2
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 4 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a3, a0, 1
-; RV64-NEXT: add a0, a3, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs2, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs3, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.x.w s2, fs5
-; RV64-NEXT: fmv.x.w s3, fs4
-; RV64-NEXT: fmv.x.w s4, fs3
-; RV64-NEXT: fmv.x.w s5, fs2
-; RV64-NEXT: fmv.x.w s6, fs1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: fmv.x.w s7, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa5, a1
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa4, a1
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s8, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s7, s7, 16
-; RV64-NEXT: fmv.w.x fa3, s7
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s8
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s6, s6, 16
-; RV64-NEXT: fmv.w.x fa3, s6
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s5, s5, 16
-; RV64-NEXT: fmv.w.x fa3, s5
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s4, s4, 16
-; RV64-NEXT: fmv.w.x fa3, s4
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s3, s3, 16
-; RV64-NEXT: fmv.w.x fa3, s3
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s2, s2, 16
-; RV64-NEXT: fmv.w.x fa3, s2
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: slli s1, s1, 16
-; RV64-NEXT: fmv.w.x fa3, s1
-; RV64-NEXT: fmadd.s fa0, fa4, fa5, fa3
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 144
-; RV64-NEXT: ret
+; CHECK-LABEL: fmsub_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a1)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = load <6 x bfloat>, ptr %z
@@ -12144,931 +1763,19 @@ define void @fnmadd_v2f64(ptr %x, ptr %y, ptr %z) {
}
define void @fadd_v16bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fadd_v16bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: mv a3, a2
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: add a2, a2, a3
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v10, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fadd_v16bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: mv a3, a2
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, a2, a3
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v10, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fadd_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
%a = load <16 x bfloat>, ptr %x
%b = load <16 x bfloat>, ptr %y
%c = fadd <16 x bfloat> %a, %b
@@ -13139,931 +1846,19 @@ define void @fadd_v4f64(ptr %x, ptr %y) {
}
define void @fsub_v16bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fsub_v16bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: mv a3, a2
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: add a2, a2, a3
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v10, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fsub_v16bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: mv a3, a2
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, a2, a3
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v10, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fsub_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
%a = load <16 x bfloat>, ptr %x
%b = load <16 x bfloat>, ptr %y
%c = fsub <16 x bfloat> %a, %b
@@ -14134,931 +1929,19 @@ define void @fsub_v4f64(ptr %x, ptr %y) {
}
define void @fmul_v16bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fmul_v16bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: mv a3, a2
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: add a2, a2, a3
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v10, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmul_v16bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: mv a3, a2
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, a2, a3
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v10, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fmul_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
%a = load <16 x bfloat>, ptr %x
%b = load <16 x bfloat>, ptr %y
%c = fmul <16 x bfloat> %a, %b
@@ -15129,931 +2012,19 @@ define void @fmul_v4f64(ptr %x, ptr %y) {
}
define void @fdiv_v16bf16(ptr %x, ptr %y) {
-; RV32-LABEL: fdiv_v16bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: mv a3, a2
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: add a2, a2, a3
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 6 * vlenb
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v10, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fdiv.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fdiv_v16bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: mv a3, a2
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, a2, a3
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x06, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 6 * vlenb
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v10, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fdiv.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fdiv_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v16, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
%a = load <16 x bfloat>, ptr %x
%b = load <16 x bfloat>, ptr %y
%c = fdiv <16 x bfloat> %a, %b
@@ -16124,636 +2095,14 @@ define void @fdiv_v4f64(ptr %x, ptr %y) {
}
define void @fneg_v16bf16(ptr %x) {
-; RV32-LABEL: fneg_v16bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
-; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 2
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 4 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s1, 524288
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s1
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 32
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fneg_v16bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 2
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 4 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa5, fa5
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fneg_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <16 x bfloat>, ptr %x
%b = fneg <16 x bfloat> %a
store <16 x bfloat> %b, ptr %x
@@ -16812,1303 +2161,21 @@ define void @fneg_v4f64(ptr %x) {
}
define void @fma_v16bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fma_v16bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 8 * vlenb
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vle16.v v8, (a2)
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 1
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 32
-; RV32-NEXT: vs2r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v10, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a2, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v12, (a1)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v12, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 8
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 9
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 10
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 11
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 12
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 13
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 14
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: mv a1, a0
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 15
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa3, a0
-; RV32-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fma_v16bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a3, a3, 3
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 8 * vlenb
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 32
-; RV64-NEXT: vs2r.v v8, (a2) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v10, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a2, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v12, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v12, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 8
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 9
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 10
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 11
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 12
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 13
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 14
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: mv a1, a0
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 15
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa3, a0
-; RV64-NEXT: fmadd.s fa0, fa3, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl2r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fma_v16bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; CHECK-NEXT: vle16.v v8, (a2)
+; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vle16.v v12, (a1)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v20, v10
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v20, v16
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v12, v8
+; CHECK-NEXT: vse16.v v12, (a0)
+; CHECK-NEXT: ret
%a = load <16 x bfloat>, ptr %x
%b = load <16 x bfloat>, ptr %y
%c = load <16 x bfloat>, ptr %z
@@ -18188,359 +2255,20 @@ define void @fma_v4f64(ptr %x, ptr %y, ptr %z) {
}
define void @fadd_vf_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fadd_vf_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fadd_vf_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fadd_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -18550,363 +2278,21 @@ define void @fadd_vf_v8bf16(ptr %x, bfloat %y) {
}
define void @fadd_vf_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fadd_vf_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fadd_vf_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fadd_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -19011,359 +2397,20 @@ define void @fadd_vf_v2f64(ptr %x, double %y) {
}
define void @fadd_fv_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fadd_fv_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fadd_fv_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fadd_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -19373,363 +2420,21 @@ define void @fadd_fv_v8bf16(ptr %x, bfloat %y) {
}
define void @fadd_fv_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fadd_fv_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fadd.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fadd_fv_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fadd.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fadd_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -19834,359 +2539,20 @@ define void @fadd_fv_v2f64(ptr %x, double %y) {
}
define void @fsub_vf_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fsub_vf_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fsub_vf_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fsub_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -20196,273 +2562,21 @@ define void @fsub_vf_v8bf16(ptr %x, bfloat %y) {
}
define void @fsub_vf_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fsub_vf_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v10, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v10, 6
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: srli a0, a0, 16
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vslidedown.vi v9, v10, 7
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fsub_vf_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v10, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v10, 6
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 48
-; RV64-NEXT: srli a0, a0, 48
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vslidedown.vi v9, v10, 7
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fsub_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -20567,359 +2681,20 @@ define void @fsub_vf_v2f64(ptr %x, double %y) {
}
define void @fsub_fv_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fsub_fv_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fsub_fv_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fsub_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -20929,363 +2704,21 @@ define void @fsub_fv_v8bf16(ptr %x, bfloat %y) {
}
define void @fsub_fv_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fsub_fv_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fsub.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fsub_fv_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fsub.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fsub_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -21390,359 +2823,20 @@ define void @fsub_fv_v2f64(ptr %x, double %y) {
}
define void @fmul_vf_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fmul_vf_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmul_vf_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fmul_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -21752,363 +2846,21 @@ define void @fmul_vf_v8bf16(ptr %x, bfloat %y) {
}
define void @fmul_vf_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fmul_vf_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmul_vf_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fmul_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -22213,359 +2965,20 @@ define void @fmul_vf_v2f64(ptr %x, double %y) {
}
define void @fmul_fv_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fmul_fv_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmul_fv_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fmul_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -22575,363 +2988,21 @@ define void @fmul_fv_v8bf16(ptr %x, bfloat %y) {
}
define void @fmul_fv_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fmul_fv_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmul.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmul_fv_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmul.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fmul_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -23036,359 +3107,20 @@ define void @fmul_fv_v2f64(ptr %x, double %y) {
}
define void @fdiv_vf_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fdiv_vf_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fdiv_vf_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fdiv_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v10, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -23398,363 +3130,21 @@ define void @fdiv_vf_v8bf16(ptr %x, bfloat %y) {
}
define void @fdiv_vf_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fdiv_vf_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fa5, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fdiv_vf_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fa5, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fdiv_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v10, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -23859,359 +3249,20 @@ define void @fdiv_vf_v2f64(ptr %x, double %y) {
}
define void @fdiv_fv_v8bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fdiv_fv_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fdiv_fv_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fdiv_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = insertelement <8 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <8 x bfloat> %b, <8 x bfloat> poison, <8 x i32> zeroinitializer
@@ -24221,363 +3272,21 @@ define void @fdiv_fv_v8bf16(ptr %x, bfloat %y) {
}
define void @fdiv_fv_v6bf16(ptr %x, bfloat %y) {
-; RV32-LABEL: fdiv_fv_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fdiv.s fa0, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fdiv_fv_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fdiv.s fa0, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fdiv_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfdiv.vv v8, v12, v10
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = insertelement <6 x bfloat> poison, bfloat %y, i32 0
%c = shufflevector <6 x bfloat> %b, <6 x bfloat> poison, <6 x i32> zeroinitializer
@@ -24682,511 +3391,22 @@ define void @fdiv_fv_v2f64(ptr %x, double %y) {
}
define void @fma_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
-; RV32-LABEL: fma_vf_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v9, (s0)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fma_vf_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v9, (s0)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fma_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
@@ -25197,515 +3417,23 @@ define void @fma_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
}
define void @fma_vf_v6bf16(ptr %x, ptr %y, bfloat %z) {
-; RV32-LABEL: fma_vf_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v9, (s0)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fma_vf_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v9, (s0)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fma_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
@@ -25823,511 +3551,22 @@ define void @fma_vf_v2f64(ptr %x, ptr %y, double %z) {
}
define void @fma_fv_v8bf16(ptr %x, ptr %y, bfloat %z) {
-; RV32-LABEL: fma_fv_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v9, (s0)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fma_fv_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v9, (s0)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fma_fv_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
@@ -26338,515 +3577,23 @@ define void @fma_fv_v8bf16(ptr %x, ptr %y, bfloat %z) {
}
define void @fma_fv_v6bf16(ptr %x, ptr %y, bfloat %z) {
-; RV32-LABEL: fma_fv_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v9, (s0)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fma_fv_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 4 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v9, (s0)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fs0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: fma_fv_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: fmv.x.w a1, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
@@ -26964,657 +3711,24 @@ define void @fma_fv_v2f64(ptr %x, ptr %y, double %z) {
}
define void @fmsub_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
-; RV32-LABEL: fmsub_vf_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
-; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: .cfi_offset s9, -44
-; RV32-NEXT: .cfi_offset fs0, -56
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a3, a2, 1
-; RV32-NEXT: add a2, a3, a2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 3 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v9, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s6, 524288
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s3, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s7, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s8, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s9, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: slli s5, s5, 16
-; RV32-NEXT: fmv.w.x fs0, s5
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: slli s9, s9, 16
-; RV32-NEXT: fmv.w.x fa5, s9
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s5
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s8, s8, 16
-; RV32-NEXT: fmv.w.x fa5, s8
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s7, s7, 16
-; RV32-NEXT: fmv.w.x fa5, s7
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s4, s4, 16
-; RV32-NEXT: fmv.w.x fa5, s4
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: slli s3, s3, 16
-; RV32-NEXT: fmv.w.x fa5, s3
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s4
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s2, s2, 16
-; RV32-NEXT: fmv.w.x fa5, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s1, s1, 16
-; RV32-NEXT: fmv.w.x fa5, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmsub_vf_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -144
-; RV64-NEXT: .cfi_def_cfa_offset 144
-; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: .cfi_offset s6, -64
-; RV64-NEXT: .cfi_offset s7, -72
-; RV64-NEXT: .cfi_offset s8, -80
-; RV64-NEXT: .cfi_offset fs0, -88
-; RV64-NEXT: .cfi_offset fs1, -96
-; RV64-NEXT: .cfi_offset fs2, -104
-; RV64-NEXT: .cfi_offset fs3, -112
-; RV64-NEXT: .cfi_offset fs4, -120
-; RV64-NEXT: .cfi_offset fs5, -128
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a3, a2, 1
-; RV64-NEXT: add a2, a3, a2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs2, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs3, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.x.w s2, fs5
-; RV64-NEXT: fmv.x.w s3, fs4
-; RV64-NEXT: fmv.x.w s5, fs3
-; RV64-NEXT: fmv.x.w s6, fs2
-; RV64-NEXT: fmv.x.w s7, fs1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: fmv.x.w s8, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli s4, s4, 16
-; RV64-NEXT: fmv.w.x fs0, s4
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa5, a1
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s8, s8, 16
-; RV64-NEXT: fmv.w.x fa4, s8
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s7, s7, 16
-; RV64-NEXT: fmv.w.x fa4, s7
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s6, s6, 16
-; RV64-NEXT: fmv.w.x fa4, s6
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s5, s5, 16
-; RV64-NEXT: fmv.w.x fa4, s5
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s3, s3, 16
-; RV64-NEXT: fmv.w.x fa4, s3
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s2, s2, 16
-; RV64-NEXT: fmv.w.x fa4, s2
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s1, s1, 16
-; RV64-NEXT: fmv.w.x fa4, s1
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 144
-; RV64-NEXT: ret
+; CHECK-LABEL: fmsub_vf_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a2, fa0
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vmv.v.x v10, a2
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = insertelement <8 x bfloat> poison, bfloat %z, i32 0
@@ -27626,661 +3740,25 @@ define void @fmsub_vf_v8bf16(ptr %x, ptr %y, bfloat %z) {
}
define void @fmsub_vf_v6bf16(ptr %x, ptr %y, bfloat %z) {
-; RV32-LABEL: fmsub_vf_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -80
-; RV32-NEXT: .cfi_def_cfa_offset 80
-; RV32-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: .cfi_offset s9, -44
-; RV32-NEXT: .cfi_offset fs0, -56
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a3, a2, 1
-; RV32-NEXT: add a2, a3, a2
-; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xd0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 80 + 3 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v9, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs1r.v v9, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v9, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: lui s6, 524288
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s3, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s7, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s8, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s9, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: xor a0, a0, s6
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: slli s5, s5, 16
-; RV32-NEXT: fmv.w.x fs0, s5
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s5, fa0
-; RV32-NEXT: slli s9, s9, 16
-; RV32-NEXT: fmv.w.x fa5, s9
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s5
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s8, s8, 16
-; RV32-NEXT: fmv.w.x fa5, s8
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s7, s7, 16
-; RV32-NEXT: fmv.w.x fa5, s7
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s4, s4, 16
-; RV32-NEXT: fmv.w.x fa5, s4
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s4, fa0
-; RV32-NEXT: slli s3, s3, 16
-; RV32-NEXT: fmv.w.x fa5, s3
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s4
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s2, s2, 16
-; RV32-NEXT: fmv.w.x fa5, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmv.w.x fs0, zero
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: slli s1, s1, 16
-; RV32-NEXT: fmv.w.x fa5, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmadd.s fa0, fa4, fs0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 80
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmsub_vf_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -144
-; RV64-NEXT: .cfi_def_cfa_offset 144
-; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 128(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 120(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 112(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s3, 104(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s4, 96(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s5, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s6, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s7, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s8, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs3, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs4, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs5, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset s3, -40
-; RV64-NEXT: .cfi_offset s4, -48
-; RV64-NEXT: .cfi_offset s5, -56
-; RV64-NEXT: .cfi_offset s6, -64
-; RV64-NEXT: .cfi_offset s7, -72
-; RV64-NEXT: .cfi_offset s8, -80
-; RV64-NEXT: .cfi_offset fs0, -88
-; RV64-NEXT: .cfi_offset fs1, -96
-; RV64-NEXT: .cfi_offset fs2, -104
-; RV64-NEXT: .cfi_offset fs3, -112
-; RV64-NEXT: .cfi_offset fs4, -120
-; RV64-NEXT: .cfi_offset fs5, -128
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a3, a2, 1
-; RV64-NEXT: add a2, a3, a2
-; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x01, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 144 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a1)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs2, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs3, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: fmv.x.w s2, fs5
-; RV64-NEXT: fmv.x.w s3, fs4
-; RV64-NEXT: fmv.x.w s5, fs3
-; RV64-NEXT: fmv.x.w s6, fs2
-; RV64-NEXT: fmv.x.w s7, fs1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fneg.s fa0, fa5
-; RV64-NEXT: fmv.x.w s8, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli s4, s4, 16
-; RV64-NEXT: fmv.w.x fs0, s4
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa5, a1
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s8, s8, 16
-; RV64-NEXT: fmv.w.x fa4, s8
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s7, s7, 16
-; RV64-NEXT: fmv.w.x fa4, s7
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s6, s6, 16
-; RV64-NEXT: fmv.w.x fa4, s6
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s5, s5, 16
-; RV64-NEXT: fmv.w.x fa4, s5
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s4, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s3, s3, 16
-; RV64-NEXT: fmv.w.x fa4, s3
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s4
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s2, s2, 16
-; RV64-NEXT: fmv.w.x fa4, s2
-; RV64-NEXT: fmv.w.x fs0, zero
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: slli s1, s1, 16
-; RV64-NEXT: fmv.w.x fa4, s1
-; RV64-NEXT: fmadd.s fa0, fa5, fs0, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 128(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 120(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 112(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s3, 104(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s4, 96(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s5, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s6, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s7, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s8, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs3, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs4, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs5, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 144
-; RV64-NEXT: ret
+; CHECK-LABEL: fmsub_vf_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmv.x.w a2, fa0
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v10, a2
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: vxor.vx v8, v8, a1
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmadd.vv v8, v14, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = insertelement <6 x bfloat> poison, bfloat %z, i32 0
@@ -28447,537 +3925,24 @@ define void @fnmadd_fv_v2f64(ptr %x, ptr %y, double %z) {
}
define void @trunc_v8bf16(ptr %x) {
-; RV32-LABEL: trunc_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: .cfi_offset fs1, -32
-; RV32-NEXT: .cfi_offset fs2, -40
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs2, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: beqz a0, .LBB169_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB169_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB169_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa5, rtz
-; RV32-NEXT: fcvt.s.w fa4, a0, rtz
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB169_4:
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: beqz a0, .LBB169_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa5, rtz
-; RV32-NEXT: fcvt.s.w fa4, a0, rtz
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB169_6:
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs1, fa0
-; RV32-NEXT: beqz a0, .LBB169_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa5, rtz
-; RV32-NEXT: fcvt.s.w fa4, a0, rtz
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB169_8:
-; RV32-NEXT: fmv.x.w s1, fs0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB169_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa5, rtz
-; RV32-NEXT: fcvt.s.w fa4, a0, rtz
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB169_10:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs1
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB169_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB169_12:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 6
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB169_14
-; RV32-NEXT: # %bb.13:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB169_14:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs2
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB169_16
-; RV32-NEXT: # %bb.15:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB169_16:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: trunc_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -96
-; RV64-NEXT: .cfi_def_cfa_offset 96
-; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset fs0, -40
-; RV64-NEXT: .cfi_offset fs1, -48
-; RV64-NEXT: .cfi_offset fs2, -56
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs2, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: beqz a0, .LBB169_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB169_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB169_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa5, rtz
-; RV64-NEXT: fcvt.s.w fa4, a0, rtz
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB169_4:
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: beqz a0, .LBB169_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa5, rtz
-; RV64-NEXT: fcvt.s.w fa4, a0, rtz
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB169_6:
-; RV64-NEXT: fmv.x.w s2, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: beqz a0, .LBB169_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa5, rtz
-; RV64-NEXT: fcvt.s.w fa4, a0, rtz
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB169_8:
-; RV64-NEXT: fmv.x.w s1, fs0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB169_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa5, rtz
-; RV64-NEXT: fcvt.s.w fa4, a0, rtz
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB169_10:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs1
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB169_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB169_12:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 6
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB169_14
-; RV64-NEXT: # %bb.13:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB169_14:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs2
-; RV64-NEXT: addi a2, sp, 32
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB169_16
-; RV64-NEXT: # %bb.15:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB169_16:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 96
-; RV64-NEXT: ret
+; CHECK-LABEL: trunc_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.trunc.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -28985,347 +3950,25 @@ define void @trunc_v8bf16(ptr %x) {
}
define void @trunc_v6bf16(ptr %x) {
-; RV32-LABEL: trunc_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: beqz a0, .LBB170_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB170_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs0
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB170_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB170_4:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 2
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB170_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB170_6:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB170_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB170_8:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB170_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB170_10:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB170_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rtz
-; RV32-NEXT: fcvt.s.w fa5, a0, rtz
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB170_12:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: trunc_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: beqz a0, .LBB170_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB170_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs0
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB170_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB170_4:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 2
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB170_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB170_6:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB170_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB170_8:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB170_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB170_10:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB170_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rtz
-; RV64-NEXT: fcvt.s.w fa5, a0, rtz
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB170_12:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: trunc_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = call <6 x bfloat> @llvm.trunc.v6bf16(<6 x bfloat> %a)
store <6 x bfloat> %b, ptr %x
@@ -29457,537 +4100,26 @@ define void @trunc_v2f64(ptr %x) {
}
define void @ceil_v8bf16(ptr %x) {
-; RV32-LABEL: ceil_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: .cfi_offset fs1, -32
-; RV32-NEXT: .cfi_offset fs2, -40
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs2, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: beqz a0, .LBB175_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB175_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB175_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa5, rup
-; RV32-NEXT: fcvt.s.w fa4, a0, rup
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB175_4:
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: beqz a0, .LBB175_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa5, rup
-; RV32-NEXT: fcvt.s.w fa4, a0, rup
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB175_6:
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs1, fa0
-; RV32-NEXT: beqz a0, .LBB175_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa5, rup
-; RV32-NEXT: fcvt.s.w fa4, a0, rup
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB175_8:
-; RV32-NEXT: fmv.x.w s1, fs0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB175_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa5, rup
-; RV32-NEXT: fcvt.s.w fa4, a0, rup
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB175_10:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs1
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB175_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB175_12:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 6
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB175_14
-; RV32-NEXT: # %bb.13:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB175_14:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs2
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB175_16
-; RV32-NEXT: # %bb.15:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB175_16:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: ceil_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -96
-; RV64-NEXT: .cfi_def_cfa_offset 96
-; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset fs0, -40
-; RV64-NEXT: .cfi_offset fs1, -48
-; RV64-NEXT: .cfi_offset fs2, -56
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs2, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: beqz a0, .LBB175_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB175_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB175_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa5, rup
-; RV64-NEXT: fcvt.s.w fa4, a0, rup
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB175_4:
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: beqz a0, .LBB175_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa5, rup
-; RV64-NEXT: fcvt.s.w fa4, a0, rup
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB175_6:
-; RV64-NEXT: fmv.x.w s2, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: beqz a0, .LBB175_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa5, rup
-; RV64-NEXT: fcvt.s.w fa4, a0, rup
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB175_8:
-; RV64-NEXT: fmv.x.w s1, fs0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB175_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa5, rup
-; RV64-NEXT: fcvt.s.w fa4, a0, rup
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB175_10:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs1
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB175_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB175_12:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 6
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB175_14
-; RV64-NEXT: # %bb.13:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB175_14:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs2
-; RV64-NEXT: addi a2, sp, 32
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB175_16
-; RV64-NEXT: # %bb.15:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB175_16:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 96
-; RV64-NEXT: ret
+; CHECK-LABEL: ceil_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 3
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.ceil.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -29995,347 +4127,27 @@ define void @ceil_v8bf16(ptr %x) {
}
define void @ceil_v6bf16(ptr %x) {
-; RV32-LABEL: ceil_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: beqz a0, .LBB176_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB176_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs0
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB176_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB176_4:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 2
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB176_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB176_6:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB176_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB176_8:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB176_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB176_10:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB176_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rup
-; RV32-NEXT: fcvt.s.w fa5, a0, rup
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB176_12:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: ceil_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: beqz a0, .LBB176_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB176_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs0
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB176_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB176_4:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 2
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB176_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB176_6:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB176_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB176_8:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB176_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB176_10:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB176_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rup
-; RV64-NEXT: fcvt.s.w fa5, a0, rup
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB176_12:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: ceil_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 3
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = call <6 x bfloat> @llvm.ceil.v6bf16(<6 x bfloat> %a)
store <6 x bfloat> %b, ptr %x
@@ -30479,537 +4291,26 @@ define void @ceil_v2f64(ptr %x) {
}
define void @floor_v8bf16(ptr %x) {
-; RV32-LABEL: floor_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: .cfi_offset fs1, -32
-; RV32-NEXT: .cfi_offset fs2, -40
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs2, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: beqz a0, .LBB181_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB181_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB181_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa5, rdn
-; RV32-NEXT: fcvt.s.w fa4, a0, rdn
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB181_4:
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: beqz a0, .LBB181_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa5, rdn
-; RV32-NEXT: fcvt.s.w fa4, a0, rdn
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB181_6:
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs1, fa0
-; RV32-NEXT: beqz a0, .LBB181_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa5, rdn
-; RV32-NEXT: fcvt.s.w fa4, a0, rdn
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB181_8:
-; RV32-NEXT: fmv.x.w s1, fs0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB181_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa5, rdn
-; RV32-NEXT: fcvt.s.w fa4, a0, rdn
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB181_10:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs1
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB181_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB181_12:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 6
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB181_14
-; RV32-NEXT: # %bb.13:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB181_14:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs2
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB181_16
-; RV32-NEXT: # %bb.15:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB181_16:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: floor_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -96
-; RV64-NEXT: .cfi_def_cfa_offset 96
-; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset fs0, -40
-; RV64-NEXT: .cfi_offset fs1, -48
-; RV64-NEXT: .cfi_offset fs2, -56
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs2, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: beqz a0, .LBB181_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB181_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB181_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa5, rdn
-; RV64-NEXT: fcvt.s.w fa4, a0, rdn
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB181_4:
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: beqz a0, .LBB181_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa5, rdn
-; RV64-NEXT: fcvt.s.w fa4, a0, rdn
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB181_6:
-; RV64-NEXT: fmv.x.w s2, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: beqz a0, .LBB181_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa5, rdn
-; RV64-NEXT: fcvt.s.w fa4, a0, rdn
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB181_8:
-; RV64-NEXT: fmv.x.w s1, fs0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB181_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa5, rdn
-; RV64-NEXT: fcvt.s.w fa4, a0, rdn
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB181_10:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs1
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB181_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB181_12:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 6
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB181_14
-; RV64-NEXT: # %bb.13:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB181_14:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs2
-; RV64-NEXT: addi a2, sp, 32
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB181_16
-; RV64-NEXT: # %bb.15:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB181_16:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 96
-; RV64-NEXT: ret
+; CHECK-LABEL: floor_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 2
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.floor.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -31017,347 +4318,27 @@ define void @floor_v8bf16(ptr %x) {
}
define void @floor_v6bf16(ptr %x) {
-; RV32-LABEL: floor_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: beqz a0, .LBB182_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB182_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs0
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB182_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB182_4:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 2
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB182_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB182_6:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB182_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB182_8:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB182_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB182_10:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB182_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rdn
-; RV32-NEXT: fcvt.s.w fa5, a0, rdn
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB182_12:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: floor_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: beqz a0, .LBB182_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB182_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs0
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB182_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB182_4:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 2
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB182_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB182_6:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB182_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB182_8:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB182_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB182_10:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB182_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rdn
-; RV64-NEXT: fcvt.s.w fa5, a0, rdn
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB182_12:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: floor_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 2
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = call <6 x bfloat> @llvm.floor.v6bf16(<6 x bfloat> %a)
store <6 x bfloat> %b, ptr %x
@@ -31501,537 +4482,26 @@ define void @floor_v2f64(ptr %x) {
}
define void @round_v8bf16(ptr %x) {
-; RV32-LABEL: round_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: .cfi_offset fs1, -32
-; RV32-NEXT: .cfi_offset fs2, -40
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs2, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: beqz a0, .LBB187_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB187_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB187_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa5, rmm
-; RV32-NEXT: fcvt.s.w fa4, a0, rmm
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB187_4:
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: beqz a0, .LBB187_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa5, rmm
-; RV32-NEXT: fcvt.s.w fa4, a0, rmm
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB187_6:
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs1, fa0
-; RV32-NEXT: beqz a0, .LBB187_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa5, rmm
-; RV32-NEXT: fcvt.s.w fa4, a0, rmm
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB187_8:
-; RV32-NEXT: fmv.x.w s1, fs0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB187_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa5, rmm
-; RV32-NEXT: fcvt.s.w fa4, a0, rmm
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB187_10:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs1
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB187_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB187_12:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 6
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB187_14
-; RV32-NEXT: # %bb.13:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB187_14:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs2
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB187_16
-; RV32-NEXT: # %bb.15:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB187_16:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: round_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -96
-; RV64-NEXT: .cfi_def_cfa_offset 96
-; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset fs0, -40
-; RV64-NEXT: .cfi_offset fs1, -48
-; RV64-NEXT: .cfi_offset fs2, -56
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs2, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: beqz a0, .LBB187_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB187_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB187_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa5, rmm
-; RV64-NEXT: fcvt.s.w fa4, a0, rmm
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB187_4:
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: beqz a0, .LBB187_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa5, rmm
-; RV64-NEXT: fcvt.s.w fa4, a0, rmm
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB187_6:
-; RV64-NEXT: fmv.x.w s2, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: beqz a0, .LBB187_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa5, rmm
-; RV64-NEXT: fcvt.s.w fa4, a0, rmm
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB187_8:
-; RV64-NEXT: fmv.x.w s1, fs0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB187_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa5, rmm
-; RV64-NEXT: fcvt.s.w fa4, a0, rmm
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB187_10:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs1
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB187_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB187_12:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 6
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB187_14
-; RV64-NEXT: # %bb.13:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB187_14:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs2
-; RV64-NEXT: addi a2, sp, 32
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB187_16
-; RV64-NEXT: # %bb.15:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB187_16:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 96
-; RV64-NEXT: ret
+; CHECK-LABEL: round_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 4
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.round.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -32039,347 +4509,27 @@ define void @round_v8bf16(ptr %x) {
}
define void @round_v6bf16(ptr %x) {
-; RV32-LABEL: round_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: beqz a0, .LBB188_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB188_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs0
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB188_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB188_4:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 2
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB188_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB188_6:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB188_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB188_8:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB188_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB188_10:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs0
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB188_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0, rmm
-; RV32-NEXT: fcvt.s.w fa5, a0, rmm
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB188_12:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: round_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 2 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: beqz a0, .LBB188_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB188_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs0
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB188_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB188_4:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 2
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB188_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB188_6:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB188_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB188_8:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB188_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB188_10:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs0
-; RV64-NEXT: addi a2, sp, 16
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB188_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0, rmm
-; RV64-NEXT: fcvt.s.w fa5, a0, rmm
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB188_12:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: round_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: fsrmi a1, 4
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: fsrm a1
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = call <6 x bfloat> @llvm.round.v6bf16(<6 x bfloat> %a)
store <6 x bfloat> %b, ptr %x
@@ -32523,537 +4673,24 @@ define void @round_v2f64(ptr %x) {
}
define void @rint_v8bf16(ptr %x) {
-; RV32-LABEL: rint_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs1, 32(sp) # 8-byte Folded Spill
-; RV32-NEXT: fsd fs2, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: .cfi_offset fs1, -32
-; RV32-NEXT: .cfi_offset fs2, -40
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: lui a0, 307200
-; RV32-NEXT: fmv.w.x fs2, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: beqz a0, .LBB193_2
-; RV32-NEXT: # %bb.1:
-; RV32-NEXT: fcvt.w.s a0, fa0
-; RV32-NEXT: fcvt.s.w fa5, a0
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB193_2:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB193_4
-; RV32-NEXT: # %bb.3:
-; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: fcvt.s.w fa4, a0
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB193_4:
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: beqz a0, .LBB193_6
-; RV32-NEXT: # %bb.5:
-; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: fcvt.s.w fa4, a0
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB193_6:
-; RV32-NEXT: fmv.x.w s2, fa0
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs1, fa0
-; RV32-NEXT: beqz a0, .LBB193_8
-; RV32-NEXT: # %bb.7:
-; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: fcvt.s.w fa4, a0
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB193_8:
-; RV32-NEXT: fmv.x.w s1, fs0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: fabs.s fa4, fa5
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: beqz a0, .LBB193_10
-; RV32-NEXT: # %bb.9:
-; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: fcvt.s.w fa4, a0
-; RV32-NEXT: fsgnj.s fa5, fa4, fa5
-; RV32-NEXT: .LBB193_10:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs1
-; RV32-NEXT: fmv.s fa0, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fa5, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa4, fa0
-; RV32-NEXT: flt.s a0, fa4, fs2
-; RV32-NEXT: fmv.x.w s1, fa5
-; RV32-NEXT: beqz a0, .LBB193_12
-; RV32-NEXT: # %bb.11:
-; RV32-NEXT: fcvt.w.s a0, fa0
-; RV32-NEXT: fcvt.s.w fa5, a0
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB193_12:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: fmv.x.w s2, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v9, v9, 6
-; RV32-NEXT: vmv.x.s a0, v9
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a0, fa5, fs2
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a0, .LBB193_14
-; RV32-NEXT: # %bb.13:
-; RV32-NEXT: fcvt.w.s a0, fa0
-; RV32-NEXT: fcvt.s.w fa5, a0
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB193_14:
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, s2
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: slli a1, a1, 16
-; RV32-NEXT: fmv.w.x fa0, a1
-; RV32-NEXT: fabs.s fa5, fa0
-; RV32-NEXT: flt.s a1, fa5, fs2
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: beqz a1, .LBB193_16
-; RV32-NEXT: # %bb.15:
-; RV32-NEXT: fcvt.w.s a0, fa0
-; RV32-NEXT: fcvt.s.w fa5, a0
-; RV32-NEXT: fsgnj.s fa0, fa5, fa0
-; RV32-NEXT: .LBB193_16:
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs1, 32(sp) # 8-byte Folded Reload
-; RV32-NEXT: fld fs2, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
-; RV32-NEXT: ret
-;
-; RV64-LABEL: rint_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -96
-; RV64-NEXT: .cfi_def_cfa_offset 96
-; RV64-NEXT: sd ra, 88(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 80(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 72(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s2, 64(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs1, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs2, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset s2, -32
-; RV64-NEXT: .cfi_offset fs0, -40
-; RV64-NEXT: .cfi_offset fs1, -48
-; RV64-NEXT: .cfi_offset fs2, -56
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xe0, 0x00, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 96 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: lui a0, 307200
-; RV64-NEXT: fmv.w.x fs2, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: beqz a0, .LBB193_2
-; RV64-NEXT: # %bb.1:
-; RV64-NEXT: fcvt.w.s a0, fa0
-; RV64-NEXT: fcvt.s.w fa5, a0
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB193_2:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB193_4
-; RV64-NEXT: # %bb.3:
-; RV64-NEXT: fcvt.w.s a0, fa5
-; RV64-NEXT: fcvt.s.w fa4, a0
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB193_4:
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: beqz a0, .LBB193_6
-; RV64-NEXT: # %bb.5:
-; RV64-NEXT: fcvt.w.s a0, fa5
-; RV64-NEXT: fcvt.s.w fa4, a0
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB193_6:
-; RV64-NEXT: fmv.x.w s2, fa0
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs1, fa0
-; RV64-NEXT: beqz a0, .LBB193_8
-; RV64-NEXT: # %bb.7:
-; RV64-NEXT: fcvt.w.s a0, fa5
-; RV64-NEXT: fcvt.s.w fa4, a0
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB193_8:
-; RV64-NEXT: fmv.x.w s1, fs0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: fabs.s fa4, fa5
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: beqz a0, .LBB193_10
-; RV64-NEXT: # %bb.9:
-; RV64-NEXT: fcvt.w.s a0, fa5
-; RV64-NEXT: fcvt.s.w fa4, a0
-; RV64-NEXT: fsgnj.s fa5, fa4, fa5
-; RV64-NEXT: .LBB193_10:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs1
-; RV64-NEXT: fmv.s fa0, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fa5, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa4, fa0
-; RV64-NEXT: flt.s a0, fa4, fs2
-; RV64-NEXT: fmv.x.w s1, fa5
-; RV64-NEXT: beqz a0, .LBB193_12
-; RV64-NEXT: # %bb.11:
-; RV64-NEXT: fcvt.w.s a0, fa0
-; RV64-NEXT: fcvt.s.w fa5, a0
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB193_12:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: fmv.x.w s2, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v9, v9, 6
-; RV64-NEXT: vmv.x.s a0, v9
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a0, fa5, fs2
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a0, .LBB193_14
-; RV64-NEXT: # %bb.13:
-; RV64-NEXT: fcvt.w.s a0, fa0
-; RV64-NEXT: fcvt.s.w fa5, a0
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB193_14:
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, s2
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a1, v8
-; RV64-NEXT: slli a1, a1, 16
-; RV64-NEXT: fmv.w.x fa0, a1
-; RV64-NEXT: fabs.s fa5, fa0
-; RV64-NEXT: flt.s a1, fa5, fs2
-; RV64-NEXT: addi a2, sp, 32
-; RV64-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: beqz a1, .LBB193_16
-; RV64-NEXT: # %bb.15:
-; RV64-NEXT: fcvt.w.s a0, fa0
-; RV64-NEXT: fcvt.s.w fa5, a0
-; RV64-NEXT: fsgnj.s fa0, fa5, fa0
-; RV64-NEXT: .LBB193_16:
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 88(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 80(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 72(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s2, 64(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs1, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs2, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 96
-; RV64-NEXT: ret
+; CHECK-LABEL: rint_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.rint.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -33143,357 +4780,26 @@ define void @rint_v2f64(ptr %x) {
}
define void @nearbyint_v8bf16(ptr %x) {
-; RV32-LABEL: nearbyint_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset fs0, -24
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 1
-; RV32-NEXT: add a1, a2, a1
-; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v8, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: fmv.x.w s1, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.s fs0, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: fmv.x.w s1, fs0
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa0, a0
-; RV32-NEXT: call nearbyintf
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: nearbyint_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -48
-; RV64-NEXT: .cfi_def_cfa_offset 48
-; RV64-NEXT: sd ra, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 32(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 24(sp) # 8-byte Folded Spill
-; RV64-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: .cfi_offset fs0, -32
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a2, a1, 1
-; RV64-NEXT: add a1, a2, a1
-; RV64-NEXT: sub sp, sp, a1
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x03, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 3 * vlenb
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v8, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 16(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: fmv.x.w s1, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.s fs0, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: fmv.x.w s1, fs0
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa0, a0
-; RV64-NEXT: call nearbyintf
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
-; RV64-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 48
-; RV64-NEXT: ret
+; CHECK-LABEL: nearbyint_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfabs.v v8, v10
+; CHECK-NEXT: lui a1, 307200
+; CHECK-NEXT: fmv.w.x fa5, a1
+; CHECK-NEXT: vmflt.vf v0, v8, fa5
+; CHECK-NEXT: frflags a1
+; CHECK-NEXT: vfcvt.x.f.v v8, v10, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: fsflags a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
+; CHECK-NEXT: vfsgnj.vv v10, v8, v10, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = call <8 x bfloat> @llvm.nearbyint.v8bf16(<8 x bfloat> %a)
store <8 x bfloat> %b, ptr %x
@@ -33591,747 +4897,26 @@ define void @nearbyint_v2f64(ptr %x) {
}
define void @fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fmuladd_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v10, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v8, (a2)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 2
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmuladd_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v10, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fmuladd_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = load <8 x bfloat>, ptr %z
@@ -34341,749 +4926,27 @@ define void @fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
}
define void @fmuladd_v6bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fmuladd_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v10, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v8, (a2)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fadd.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 2
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmuladd_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v10, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fadd.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fmuladd_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfadd.vv v8, v8, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = load <6 x bfloat>, ptr %z
@@ -35208,747 +5071,26 @@ define void @fmuladd_v2f64(ptr %x, ptr %y, ptr %z) {
}
define void @fmsub_fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fmsub_fmuladd_v8bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v10, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v8, (a2)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 2
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmsub_fmuladd_v8bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v10, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fmsub_fmuladd_v8bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <8 x bfloat>, ptr %x
%b = load <8 x bfloat>, ptr %y
%c = load <8 x bfloat>, ptr %z
@@ -35959,749 +5101,27 @@ define void @fmsub_fmuladd_v8bf16(ptr %x, ptr %y, ptr %z) {
}
define void @fmsub_fmuladd_v6bf16(ptr %x, ptr %y, ptr %z) {
-; RV32-LABEL: fmsub_fmuladd_v6bf16:
-; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -48
-; RV32-NEXT: .cfi_def_cfa_offset 48
-; RV32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 2
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 5 * vlenb
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV32-NEXT: vle16.v v10, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 1
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: mv s0, a0
-; RV32-NEXT: vle16.v v9, (a0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vle16.v v8, (a2)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vslidedown.vi v8, v10, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: vslidedown.vi v8, v9, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 1
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 2
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 3
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 5
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w s1, fa0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 4
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.x v8, a0
-; RV32-NEXT: vslide1down.vx v8, v8, s1
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 6
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: addi a0, sp, 32
-; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 1
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fmul.s fa0, fa4, fa5
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa5, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 7
-; RV32-NEXT: vmv.x.s a0, v8
-; RV32-NEXT: slli a0, a0, 16
-; RV32-NEXT: fmv.w.x fa4, a0
-; RV32-NEXT: fsub.s fa0, fa5, fa4
-; RV32-NEXT: call __truncsfbf2
-; RV32-NEXT: fmv.x.w a0, fa0
-; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT: vmv.v.i v0, 15
-; RV32-NEXT: addi a1, sp, 32
-; RV32-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslide1down.vx v8, v8, a0
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 32
-; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV32-NEXT: vse16.v v8, (s0)
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a1, a0, 2
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 48
-; RV32-NEXT: ret
-;
-; RV64-LABEL: fmsub_fmuladd_v6bf16:
-; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -64
-; RV64-NEXT: .cfi_def_cfa_offset 64
-; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
-; RV64-NEXT: sd s1, 40(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
-; RV64-NEXT: .cfi_offset s0, -16
-; RV64-NEXT: .cfi_offset s1, -24
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a4, a3, 2
-; RV64-NEXT: add a3, a4, a3
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x05, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 5 * vlenb
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; RV64-NEXT: vle16.v v10, (a1)
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vs1r.v v10, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: mv s0, a0
-; RV64-NEXT: vle16.v v9, (a0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vle16.v v8, (a2)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vslidedown.vi v8, v10, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: vslidedown.vi v8, v9, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 1
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: lh a0, 32(a0) # 8-byte Folded Reload
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 2
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 3
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 5
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w s1, fa0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 4
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.x v8, a0
-; RV64-NEXT: vslide1down.vx v8, v8, s1
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 6
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: addi a0, sp, 32
-; RV64-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 1
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fmul.s fa0, fa4, fa5
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa5, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 2
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 7
-; RV64-NEXT: vmv.x.s a0, v8
-; RV64-NEXT: slli a0, a0, 16
-; RV64-NEXT: fmv.w.x fa4, a0
-; RV64-NEXT: fsub.s fa0, fa5, fa4
-; RV64-NEXT: call __truncsfbf2
-; RV64-NEXT: fmv.x.w a0, fa0
-; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; RV64-NEXT: vmv.v.i v0, 15
-; RV64-NEXT: addi a1, sp, 32
-; RV64-NEXT: vl1r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vslide1down.vx v8, v8, a0
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 32
-; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
-; RV64-NEXT: vsetivli zero, 6, e16, m1, ta, mu
-; RV64-NEXT: vslidedown.vi v8, v9, 4, v0.t
-; RV64-NEXT: vse16.v v8, (s0)
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 2
-; RV64-NEXT: add a0, a1, a0
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
-; RV64-NEXT: ld s1, 40(sp) # 8-byte Folded Reload
-; RV64-NEXT: addi sp, sp, 64
-; RV64-NEXT: ret
+; CHECK-LABEL: fmsub_fmuladd_v6bf16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v9, (a0)
+; CHECK-NEXT: vle16.v v10, (a2)
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v14, v9
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfmul.vv v8, v14, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v11, v8
+; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v11
+; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT: vfsub.vv v8, v8, v12
+; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
+; CHECK-NEXT: vfncvtbf16.f.f.w v10, v8
+; CHECK-NEXT: vse16.v v10, (a0)
+; CHECK-NEXT: ret
%a = load <6 x bfloat>, ptr %x
%b = load <6 x bfloat>, ptr %y
%c = load <6 x bfloat>, ptr %z
>From 2d3eb8bdea147ab0ffa91b81e6771a4214c1e55c Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 15 Oct 2024 17:29:45 +0100
Subject: [PATCH 3/3] clang-format
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index ae61b03a4aa3b6..3c4149b45575ce 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1380,7 +1380,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
{ISD::VP_MERGE, ISD::VP_SELECT, ISD::VSELECT, ISD::SELECT}, VT,
Custom);
// TODO: Promote to fp32.
- MVT F32VecVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
+ MVT F32VecVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
// Don't promote f16 vector operations to f32 if f32 vector type is
// not legal.
// TODO: could split the f16 vector into two vectors and do promotion.
More information about the llvm-commits
mailing list