[llvm] f3afd0d - [RISCV] Add tests for fixed vector conversions between fp to/from i1

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Fri May 28 04:42:22 PDT 2021


Author: Fraser Cormack
Date: 2021-05-28T12:31:47+01:00
New Revision: f3afd0d193d2869933559f6d1a876b6c5707d1a9

URL: https://github.com/llvm/llvm-project/commit/f3afd0d193d2869933559f6d1a876b6c5707d1a9
DIFF: https://github.com/llvm/llvm-project/commit/f3afd0d193d2869933559f6d1a876b6c5707d1a9.diff

LOG: [RISCV] Add tests for fixed vector conversions between fp to/from i1

These fixed-length versions don't crash unlike the corresponding
scalable ones, but the code generation is scalarized. An imminent patch
will support scalable-vector conversions and improve the codegen for
these fixed-length conversions.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
index 547315feaa22b..1198dabefe7b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,RV32,RV32-LMULMAX8
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,RV64,RV64-LMULMAX8
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV32,RV32-LMULMAX1
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV64,RV64-LMULMAX1
 
 define void @fp2si_v2f32_v2i32(<2 x float>* %x, <2 x i32>* %y) {
 ; CHECK-LABEL: fp2si_v2f32_v2i32:
@@ -32,6 +32,82 @@ define void @fp2ui_v2f32_v2i32(<2 x float>* %x, <2 x i32>* %y) {
   ret void
 }
 
+define <2 x i1> @fp2si_v2f32_v2i1(<2 x float> %x) {
+; RV32-LABEL: fp2si_v2f32_v2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e32,mf2,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-NEXT:    vfmv.f.s ft0, v25
+; RV32-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.x v25, a0
+; RV32-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vand.vi v25, v25, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: fp2si_v2f32_v2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32,mf2,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-NEXT:    vfmv.f.s ft0, v25
+; RV64-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.x v25, a0
+; RV64-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vand.vi v25, v25, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %z = fptosi <2 x float> %x to <2 x i1>
+  ret <2 x i1> %z
+}
+
+define <2 x i1> @fp2ui_v2f32_v2i1(<2 x float> %x) {
+; RV32-LABEL: fp2ui_v2f32_v2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e32,mf2,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-NEXT:    vfmv.f.s ft0, v25
+; RV32-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.x v25, a0
+; RV32-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vand.vi v25, v25, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: fp2ui_v2f32_v2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e32,mf2,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-NEXT:    vfmv.f.s ft0, v25
+; RV64-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.x v25, a0
+; RV64-NEXT:    vsetvli zero, zero, e32,mf2,ta,mu
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vand.vi v25, v25, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %z = fptoui <2 x float> %x to <2 x i1>
+  ret <2 x i1> %z
+}
+
 define void @fp2si_v8f32_v8i32(<8 x float>* %x, <8 x i32>* %y) {
 ; LMULMAX8-LABEL: fp2si_v8f32_v8i32:
 ; LMULMAX8:       # %bb.0:
@@ -86,6 +162,370 @@ define void @fp2ui_v8f32_v8i32(<8 x float>* %x, <8 x i32>* %y) {
   ret void
 }
 
+define <8 x i1> @fp2si_v8f32_v8i1(<8 x float> %x) {
+; RV32-LMULMAX8-LABEL: fp2si_v8f32_v8i1:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX8-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e32,m2,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 7
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 6
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 5
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 4
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 3
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 2
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 1
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: fp2si_v8f32_v8i1:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX8-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e32,m2,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 7
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 6
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 5
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 4
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 3
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 2
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 1
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: fp2si_v8f32_v8i1:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX1-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e32,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 3
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 2
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 3
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 2
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: fp2si_v8f32_v8i1:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX1-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e32,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 3
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 2
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 3
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 2
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX1-NEXT:    ret
+  %z = fptosi <8 x float> %x to <8 x i1>
+  ret <8 x i1> %z
+}
+
+define <8 x i1> @fp2ui_v8f32_v8i1(<8 x float> %x) {
+; RV32-LMULMAX8-LABEL: fp2ui_v8f32_v8i1:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX8-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e32,m2,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 7
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 6
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 5
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 4
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 3
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 2
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 1
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV32-LMULMAX8-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: fp2ui_v8f32_v8i1:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX8-NEXT:    vsetvli zero, zero, e32,m2,ta,mu
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e32,m2,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 7
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 6
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 5
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 4
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 3
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 2
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v8, 1
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v26
+; RV64-LMULMAX8-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: fp2ui_v8f32_v8i1:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX1-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e32,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 3
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 2
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 3
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 2
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.s a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: fp2ui_v8f32_v8i1:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX1-NEXT:    vsetvli zero, zero, e32,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e32,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 3
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 2
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 3
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 2
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.s a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX1-NEXT:    ret
+  %z = fptoui <8 x float> %x to <8 x i1>
+  ret <8 x i1> %z
+}
+
 define void @fp2si_v2f32_v2i64(<2 x float>* %x, <2 x i64>* %y) {
 ; CHECK-LABEL: fp2si_v2f32_v2i64:
 ; CHECK:       # %bb.0:
@@ -232,6 +672,82 @@ define void @fp2ui_v2f16_v2i64(<2 x half>* %x, <2 x i64>* %y) {
   ret void
 }
 
+define <2 x i1> @fp2si_v2f16_v2i1(<2 x half> %x) {
+; RV32-LABEL: fp2si_v2f16_v2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16,mf4,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-NEXT:    vfmv.f.s ft0, v25
+; RV32-NEXT:    fcvt.w.h a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.x v25, a0
+; RV32-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fcvt.w.h a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vand.vi v25, v25, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: fp2si_v2f16_v2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16,mf4,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-NEXT:    vfmv.f.s ft0, v25
+; RV64-NEXT:    fcvt.l.h a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.x v25, a0
+; RV64-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fcvt.l.h a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vand.vi v25, v25, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %z = fptosi <2 x half> %x to <2 x i1>
+  ret <2 x i1> %z
+}
+
+define <2 x i1> @fp2ui_v2f16_v2i1(<2 x half> %x) {
+; RV32-LABEL: fp2ui_v2f16_v2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e16,mf4,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-NEXT:    vfmv.f.s ft0, v25
+; RV32-NEXT:    fcvt.wu.h a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.x v25, a0
+; RV32-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fcvt.wu.h a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vand.vi v25, v25, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: fp2ui_v2f16_v2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e16,mf4,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-NEXT:    vfmv.f.s ft0, v25
+; RV64-NEXT:    fcvt.lu.h a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.x v25, a0
+; RV64-NEXT:    vsetvli zero, zero, e16,mf4,ta,mu
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fcvt.lu.h a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vand.vi v25, v25, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %z = fptoui <2 x half> %x to <2 x i1>
+  ret <2 x i1> %z
+}
+
 define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
 ; CHECK-LABEL: fp2si_v2f64_v2i8:
 ; CHECK:       # %bb.0:
@@ -270,6 +786,82 @@ define void @fp2ui_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
   ret void
 }
 
+define <2 x i1> @fp2si_v2f64_v2i1(<2 x double> %x) {
+; RV32-LABEL: fp2si_v2f64_v2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-NEXT:    vfmv.f.s ft0, v25
+; RV32-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.x v25, a0
+; RV32-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vand.vi v25, v25, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: fp2si_v2f64_v2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-NEXT:    vfmv.f.s ft0, v25
+; RV64-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.x v25, a0
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vand.vi v25, v25, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %z = fptosi <2 x double> %x to <2 x i1>
+  ret <2 x i1> %z
+}
+
+define <2 x i1> @fp2ui_v2f64_v2i1(<2 x double> %x) {
+; RV32-LABEL: fp2ui_v2f64_v2i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-NEXT:    vfmv.f.s ft0, v25
+; RV32-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.x v25, a0
+; RV32-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV32-NEXT:    vfmv.f.s ft0, v8
+; RV32-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.s.x v25, a0
+; RV32-NEXT:    vand.vi v25, v25, 1
+; RV32-NEXT:    vmsne.vi v0, v25, 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: fp2ui_v2f64_v2i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-NEXT:    vfmv.f.s ft0, v25
+; RV64-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.x v25, a0
+; RV64-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV64-NEXT:    vfmv.f.s ft0, v8
+; RV64-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.s.x v25, a0
+; RV64-NEXT:    vand.vi v25, v25, 1
+; RV64-NEXT:    vmsne.vi v0, v25, 0
+; RV64-NEXT:    ret
+  %z = fptoui <2 x double> %x to <2 x i1>
+  ret <2 x i1> %z
+}
+
 define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
 ; LMULMAX8-LABEL: fp2si_v8f64_v8i8:
 ; LMULMAX8:       # %bb.0:
@@ -417,3 +1009,359 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
   store <8 x i8> %d, <8 x i8>* %y
   ret void
 }
+
+define <8 x i1> @fp2si_v8f64_v8i1(<8 x double> %x) {
+; RV32-LMULMAX8-LABEL: fp2si_v8f64_v8i1:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX8-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e64,m4,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 7
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 6
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 5
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 4
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 3
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 2
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 1
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: fp2si_v8f64_v8i1:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX8-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e64,m4,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 7
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 6
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 5
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 4
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 3
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 2
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 1
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: fp2si_v8f64_v8i1:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v11
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v10
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v11, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v10, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.w.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: fp2si_v8f64_v8i1:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v11
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v10
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v11, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v10, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.l.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX1-NEXT:    ret
+  %z = fptosi <8 x double> %x to <8 x i1>
+  ret <8 x i1> %z
+}
+
+define <8 x i1> @fp2ui_v8f64_v8i1(<8 x double> %x) {
+; RV32-LMULMAX8-LABEL: fp2ui_v8f64_v8i1:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX8-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e64,m4,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 7
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 6
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 5
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 4
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 3
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 2
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 1
+; RV32-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV32-LMULMAX8-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: fp2ui_v8f64_v8i1:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX8-NEXT:    vsetvli zero, zero, e64,m4,ta,mu
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e64,m4,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 7
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 6
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 5
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 4
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 3
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 2
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v28, v8, 1
+; RV64-LMULMAX8-NEXT:    vfmv.f.s ft0, v28
+; RV64-LMULMAX8-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX8-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX8-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX8-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: fp2ui_v8f64_v8i1:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV32-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV32-LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v11
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v10
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v11, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v10, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV32-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV32-LMULMAX1-NEXT:    fcvt.wu.d a0, ft0, rtz
+; RV32-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV32-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV32-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV32-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: fp2ui_v8f64_v8i1:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    addi sp, sp, -16
+; RV64-LMULMAX1-NEXT:    .cfi_def_cfa_offset 16
+; RV64-LMULMAX1-NEXT:    vsetvli zero, zero, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v11
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 14(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v10
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 12(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v9
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 10(sp)
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v8
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 8(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v11, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 15(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v10, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 13(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v9, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 11(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v8, 1
+; RV64-LMULMAX1-NEXT:    vfmv.f.s ft0, v25
+; RV64-LMULMAX1-NEXT:    fcvt.lu.d a0, ft0, rtz
+; RV64-LMULMAX1-NEXT:    sb a0, 9(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    addi a0, sp, 8
+; RV64-LMULMAX1-NEXT:    vle8.v v25, (a0)
+; RV64-LMULMAX1-NEXT:    vand.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v25, 0
+; RV64-LMULMAX1-NEXT:    addi sp, sp, 16
+; RV64-LMULMAX1-NEXT:    ret
+  %z = fptoui <8 x double> %x to <8 x i1>
+  ret <8 x i1> %z
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index 38d6dbc614ec8..eabdb3d7f5c46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,RV32,RV32-LMULMAX8
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,RV64,RV64-LMULMAX8
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV32,RV32-LMULMAX1
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,RV64,RV64-LMULMAX1
 
 define void @si2fp_v2i32_v2f32(<2 x i32>* %x, <2 x float>* %y) {
 ; CHECK-LABEL: si2fp_v2i32_v2f32:
@@ -32,6 +32,90 @@ define void @ui2fp_v2i32_v2f32(<2 x i32>* %x, <2 x float>* %y) {
   ret void
 }
 
+define <2 x float> @si2fp_v2i1_v2f32(<2 x i1> %x) {
+; RV32-LABEL: si2fp_v2i1_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.s.w ft0, a0
+; RV32-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.s.w ft1, a0
+; RV32-NEXT:    vsetivli zero, 2, e32,mf2,ta,mu
+; RV32-NEXT:    vfmv.v.f v8, ft1
+; RV32-NEXT:    vfmv.s.f v8, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: si2fp_v2i1_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.s.l ft0, a0
+; RV64-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.s.l ft1, a0
+; RV64-NEXT:    vsetivli zero, 2, e32,mf2,ta,mu
+; RV64-NEXT:    vfmv.v.f v8, ft1
+; RV64-NEXT:    vfmv.s.f v8, ft0
+; RV64-NEXT:    ret
+  %z = sitofp <2 x i1> %x to <2 x float>
+  ret <2 x float> %z
+}
+
+define <2 x float> @ui2fp_v2i1_v2f32(<2 x i1> %x) {
+; RV32-LABEL: ui2fp_v2i1_v2f32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.s.wu ft0, a0
+; RV32-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.s.wu ft1, a0
+; RV32-NEXT:    vsetivli zero, 2, e32,mf2,ta,mu
+; RV32-NEXT:    vfmv.v.f v8, ft1
+; RV32-NEXT:    vfmv.s.f v8, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ui2fp_v2i1_v2f32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.s.lu ft0, a0
+; RV64-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.s.lu ft1, a0
+; RV64-NEXT:    vsetivli zero, 2, e32,mf2,ta,mu
+; RV64-NEXT:    vfmv.v.f v8, ft1
+; RV64-NEXT:    vfmv.s.f v8, ft0
+; RV64-NEXT:    ret
+  %z = uitofp <2 x i1> %x to <2 x float>
+  ret <2 x float> %z
+}
+
 define void @si2fp_v8i32_v8f32(<8 x i32>* %x, <8 x float>* %y) {
 ; LMULMAX8-LABEL: si2fp_v8i32_v8f32:
 ; LMULMAX8:       # %bb.0:
@@ -86,6 +170,542 @@ define void @ui2fp_v8i32_v8f32(<8 x i32>* %x, <8 x float>* %y) {
   ret void
 }
 
+define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) {
+; RV32-LMULMAX8-LABEL: si2fp_v8i1_v8f32:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -64
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 64
+; RV32-LMULMAX8-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    .cfi_offset ra, -4
+; RV32-LMULMAX8-NEXT:    .cfi_offset s0, -8
+; RV32-LMULMAX8-NEXT:    addi s0, sp, 64
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV32-LMULMAX8-NEXT:    andi sp, sp, -32
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 0(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 28(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 24(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 20(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 16(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 12(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 4(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
+; RV32-LMULMAX8-NEXT:    vle32.v v8, (sp)
+; RV32-LMULMAX8-NEXT:    addi sp, s0, -64
+; RV32-LMULMAX8-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 64
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: si2fp_v8i1_v8f32:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -64
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 64
+; RV64-LMULMAX8-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    .cfi_offset ra, -8
+; RV64-LMULMAX8-NEXT:    .cfi_offset s0, -16
+; RV64-LMULMAX8-NEXT:    addi s0, sp, 64
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV64-LMULMAX8-NEXT:    andi sp, sp, -32
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 0(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 28(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 24(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 20(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 16(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 12(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 4(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
+; RV64-LMULMAX8-NEXT:    vle32.v v8, (sp)
+; RV64-LMULMAX8-NEXT:    addi sp, s0, -64
+; RV64-LMULMAX8-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 64
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: si2fp_v8i1_v8f32:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    addi sp, sp, -32
+; RV32-LMULMAX1-NEXT:    .cfi_def_cfa_offset 32
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 16(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 3
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 28(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 2
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 24(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 20(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 4
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 0(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 12(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 8(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.s.w ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 4(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e32,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    addi a0, sp, 16
+; RV32-LMULMAX1-NEXT:    vle32.v v8, (a0)
+; RV32-LMULMAX1-NEXT:    vle32.v v9, (sp)
+; RV32-LMULMAX1-NEXT:    addi sp, sp, 32
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: si2fp_v8i1_v8f32:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    addi sp, sp, -32
+; RV64-LMULMAX1-NEXT:    .cfi_def_cfa_offset 32
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 16(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 3
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 28(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 2
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 24(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 20(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 4
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 0(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 12(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 8(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.s.l ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 4(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e32,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    addi a0, sp, 16
+; RV64-LMULMAX1-NEXT:    vle32.v v8, (a0)
+; RV64-LMULMAX1-NEXT:    vle32.v v9, (sp)
+; RV64-LMULMAX1-NEXT:    addi sp, sp, 32
+; RV64-LMULMAX1-NEXT:    ret
+  %z = sitofp <8 x i1> %x to <8 x float>
+  ret <8 x float> %z
+}
+
+define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
+; RV32-LMULMAX8-LABEL: ui2fp_v8i1_v8f32:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -64
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 64
+; RV32-LMULMAX8-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    .cfi_offset ra, -4
+; RV32-LMULMAX8-NEXT:    .cfi_offset s0, -8
+; RV32-LMULMAX8-NEXT:    addi s0, sp, 64
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV32-LMULMAX8-NEXT:    andi sp, sp, -32
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 0(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 28(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 24(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 20(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 16(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 12(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsw ft0, 4(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
+; RV32-LMULMAX8-NEXT:    vle32.v v8, (sp)
+; RV32-LMULMAX8-NEXT:    addi sp, s0, -64
+; RV32-LMULMAX8-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 64
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: ui2fp_v8i1_v8f32:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -64
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 64
+; RV64-LMULMAX8-NEXT:    sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    .cfi_offset ra, -8
+; RV64-LMULMAX8-NEXT:    .cfi_offset s0, -16
+; RV64-LMULMAX8-NEXT:    addi s0, sp, 64
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV64-LMULMAX8-NEXT:    andi sp, sp, -32
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 0(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 28(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 24(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 20(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 16(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 12(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsw ft0, 4(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e32,m2,ta,mu
+; RV64-LMULMAX8-NEXT:    vle32.v v8, (sp)
+; RV64-LMULMAX8-NEXT:    addi sp, s0, -64
+; RV64-LMULMAX8-NEXT:    ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 64
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: ui2fp_v8i1_v8f32:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    addi sp, sp, -32
+; RV32-LMULMAX1-NEXT:    .cfi_def_cfa_offset 32
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 16(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 3
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 28(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 2
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 24(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 20(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 4
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 0(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 12(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 8(sp)
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.s.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    fsw ft0, 4(sp)
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e32,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    addi a0, sp, 16
+; RV32-LMULMAX1-NEXT:    vle32.v v8, (a0)
+; RV32-LMULMAX1-NEXT:    vle32.v v9, (sp)
+; RV32-LMULMAX1-NEXT:    addi sp, sp, 32
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: ui2fp_v8i1_v8f32:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    addi sp, sp, -32
+; RV64-LMULMAX1-NEXT:    .cfi_def_cfa_offset 32
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 16(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 3
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 28(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v26, 2
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 24(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 20(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 4
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 0(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 12(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 8(sp)
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.s.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    fsw ft0, 4(sp)
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e32,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    addi a0, sp, 16
+; RV64-LMULMAX1-NEXT:    vle32.v v8, (a0)
+; RV64-LMULMAX1-NEXT:    vle32.v v9, (sp)
+; RV64-LMULMAX1-NEXT:    addi sp, sp, 32
+; RV64-LMULMAX1-NEXT:    ret
+  %z = uitofp <8 x i1> %x to <8 x float>
+  ret <8 x float> %z
+}
+
 define void @si2fp_v2i16_v2f64(<2 x i16>* %x, <2 x double>* %y) {
 ; CHECK-LABEL: si2fp_v2i16_v2f64:
 ; CHECK:       # %bb.0:
@@ -208,6 +828,602 @@ define void @ui2fp_v8i16_v8f64(<8 x i16>* %x, <8 x double>* %y) {
   ret void
 }
 
+define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
+; RV32-LMULMAX8-LABEL: si2fp_v8i1_v8f64:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -128
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 128
+; RV32-LMULMAX8-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    .cfi_offset ra, -4
+; RV32-LMULMAX8-NEXT:    .cfi_offset s0, -8
+; RV32-LMULMAX8-NEXT:    addi s0, sp, 128
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV32-LMULMAX8-NEXT:    andi sp, sp, -64
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 0(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 56(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 48(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 40(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 32(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 24(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 16(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    neg a0, a0
+; RV32-LMULMAX8-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
+; RV32-LMULMAX8-NEXT:    vle64.v v8, (sp)
+; RV32-LMULMAX8-NEXT:    addi sp, s0, -128
+; RV32-LMULMAX8-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 128
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: si2fp_v8i1_v8f64:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -128
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 128
+; RV64-LMULMAX8-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    .cfi_offset ra, -8
+; RV64-LMULMAX8-NEXT:    .cfi_offset s0, -16
+; RV64-LMULMAX8-NEXT:    addi s0, sp, 128
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV64-LMULMAX8-NEXT:    andi sp, sp, -64
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 0(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 56(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 48(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 40(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 32(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 24(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 16(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    neg a0, a0
+; RV64-LMULMAX8-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
+; RV64-LMULMAX8-NEXT:    vle64.v v8, (sp)
+; RV64-LMULMAX8-NEXT:    addi sp, s0, -128
+; RV64-LMULMAX8-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 128
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: si2fp_v8i1_v8f64:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v8, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v8, ft0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv1r.v v28, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v9, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v9, ft0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v27, 0
+; RV32-LMULMAX1-NEXT:    vmv1r.v v0, v28
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v10, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v10, ft0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 2
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    neg a0, a0
+; RV32-LMULMAX1-NEXT:    fcvt.d.w ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v11, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v11, ft0
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: si2fp_v8i1_v8f64:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v8, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v8, ft0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv1r.v v28, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v9, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v9, ft0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v27, 0
+; RV64-LMULMAX1-NEXT:    vmv1r.v v0, v28
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v10, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v10, ft0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 2
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    neg a0, a0
+; RV64-LMULMAX1-NEXT:    fcvt.d.l ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v11, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v11, ft0
+; RV64-LMULMAX1-NEXT:    ret
+  %z = sitofp <8 x i1> %x to <8 x double>
+  ret <8 x double> %z
+}
+
+define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
+; RV32-LMULMAX8-LABEL: ui2fp_v8i1_v8f64:
+; RV32-LMULMAX8:       # %bb.0:
+; RV32-LMULMAX8-NEXT:    addi sp, sp, -128
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa_offset 128
+; RV32-LMULMAX8-NEXT:    sw ra, 124(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    sw s0, 120(sp) # 4-byte Folded Spill
+; RV32-LMULMAX8-NEXT:    .cfi_offset ra, -4
+; RV32-LMULMAX8-NEXT:    .cfi_offset s0, -8
+; RV32-LMULMAX8-NEXT:    addi s0, sp, 128
+; RV32-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV32-LMULMAX8-NEXT:    andi sp, sp, -64
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 0(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 56(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 48(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 40(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 32(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 24(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 16(sp)
+; RV32-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX8-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX8-NEXT:    fsd ft0, 8(sp)
+; RV32-LMULMAX8-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
+; RV32-LMULMAX8-NEXT:    vle64.v v8, (sp)
+; RV32-LMULMAX8-NEXT:    addi sp, s0, -128
+; RV32-LMULMAX8-NEXT:    lw s0, 120(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    lw ra, 124(sp) # 4-byte Folded Reload
+; RV32-LMULMAX8-NEXT:    addi sp, sp, 128
+; RV32-LMULMAX8-NEXT:    ret
+;
+; RV64-LMULMAX8-LABEL: ui2fp_v8i1_v8f64:
+; RV64-LMULMAX8:       # %bb.0:
+; RV64-LMULMAX8-NEXT:    addi sp, sp, -128
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa_offset 128
+; RV64-LMULMAX8-NEXT:    sd ra, 120(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    sd s0, 112(sp) # 8-byte Folded Spill
+; RV64-LMULMAX8-NEXT:    .cfi_offset ra, -8
+; RV64-LMULMAX8-NEXT:    .cfi_offset s0, -16
+; RV64-LMULMAX8-NEXT:    addi s0, sp, 128
+; RV64-LMULMAX8-NEXT:    .cfi_def_cfa s0, 0
+; RV64-LMULMAX8-NEXT:    andi sp, sp, -64
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX8-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 0(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 7
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 56(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 6
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 48(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 5
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 40(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 4
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 32(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 24(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 16(sp)
+; RV64-LMULMAX8-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX8-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX8-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX8-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX8-NEXT:    fsd ft0, 8(sp)
+; RV64-LMULMAX8-NEXT:    vsetivli zero, 8, e64,m4,ta,mu
+; RV64-LMULMAX8-NEXT:    vle64.v v8, (sp)
+; RV64-LMULMAX8-NEXT:    addi sp, s0, -128
+; RV64-LMULMAX8-NEXT:    ld s0, 112(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    ld ra, 120(sp) # 8-byte Folded Reload
+; RV64-LMULMAX8-NEXT:    addi sp, sp, 128
+; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-LMULMAX1-LABEL: ui2fp_v8i1_v8f64:
+; RV32-LMULMAX1:       # %bb.0:
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v8, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v8, ft0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv1r.v v28, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v9, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v9, ft0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vmv.v.i v27, 0
+; RV32-LMULMAX1-NEXT:    vmv1r.v v0, v28
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v10, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v10, ft0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 2
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV32-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft0, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV32-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV32-LMULMAX1-NEXT:    fcvt.d.wu ft1, a0
+; RV32-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV32-LMULMAX1-NEXT:    vfmv.v.f v11, ft1
+; RV32-LMULMAX1-NEXT:    vfmv.s.f v11, ft0
+; RV32-LMULMAX1-NEXT:    ret
+;
+; RV64-LMULMAX1-LABEL: ui2fp_v8i1_v8f64:
+; RV64-LMULMAX1:       # %bb.0:
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v25, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v26
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v8, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v8, ft0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v26, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv1r.v v28, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 2
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v9, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v9, ft0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vmv.v.i v27, 0
+; RV64-LMULMAX1-NEXT:    vmv1r.v v0, v28
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v27, 1, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf2,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 4
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v27, 0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmerge.vim v27, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v27, v27, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v27
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v10, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v10, ft0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 4, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vmerge.vim v26, v26, 1, v0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf4,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v26, v26, 2
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vmsne.vi v0, v26, 0
+; RV64-LMULMAX1-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft0, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-LMULMAX1-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-LMULMAX1-NEXT:    vmv.x.s a0, v25
+; RV64-LMULMAX1-NEXT:    andi a0, a0, 1
+; RV64-LMULMAX1-NEXT:    fcvt.d.lu ft1, a0
+; RV64-LMULMAX1-NEXT:    vsetivli zero, 2, e64,m1,ta,mu
+; RV64-LMULMAX1-NEXT:    vfmv.v.f v11, ft1
+; RV64-LMULMAX1-NEXT:    vfmv.s.f v11, ft0
+; RV64-LMULMAX1-NEXT:    ret
+  %z = uitofp <8 x i1> %x to <8 x double>
+  ret <8 x double> %z
+}
+
 define void @si2fp_v2i64_v2f16(<2 x i64>* %x, <2 x half>* %y) {
 ; CHECK-LABEL: si2fp_v2i64_v2f16:
 ; CHECK:       # %bb.0:
@@ -242,6 +1458,90 @@ define void @ui2fp_v2i64_v2f16(<2 x i64>* %x, <2 x half>* %y) {
   ret void
 }
 
+define <2 x half> @si2fp_v2i1_v2f16(<2 x i1> %x) {
+; RV32-LABEL: si2fp_v2i1_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft1, a0
+; RV32-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
+; RV32-NEXT:    vfmv.v.f v8, ft1
+; RV32-NEXT:    vfmv.s.f v8, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: si2fp_v2i1_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft1, a0
+; RV64-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
+; RV64-NEXT:    vfmv.v.f v8, ft1
+; RV64-NEXT:    vfmv.s.f v8, ft0
+; RV64-NEXT:    ret
+  %z = sitofp <2 x i1> %x to <2 x half>
+  ret <2 x half> %z
+}
+
+define <2 x half> @ui2fp_v2i1_v2f16(<2 x i1> %x) {
+; RV32-LABEL: ui2fp_v2i1_v2f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV32-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft1, a0
+; RV32-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
+; RV32-NEXT:    vfmv.v.f v8, ft1
+; RV32-NEXT:    vfmv.s.f v8, ft0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ui2fp_v2i1_v2f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e8,mf8,ta,mu
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    vsetivli zero, 1, e8,mf8,ta,mu
+; RV64-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft1, a0
+; RV64-NEXT:    vsetivli zero, 2, e16,mf4,ta,mu
+; RV64-NEXT:    vfmv.v.f v8, ft1
+; RV64-NEXT:    vfmv.s.f v8, ft0
+; RV64-NEXT:    ret
+  %z = uitofp <2 x i1> %x to <2 x half>
+  ret <2 x half> %z
+}
+
 define void @si2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
 ; LMULMAX8-LABEL: si2fp_v8i64_v8f16:
 ; LMULMAX8:       # %bb.0:
@@ -377,3 +1677,235 @@ define void @ui2fp_v8i64_v8f16(<8 x i64>* %x, <8 x half>* %y) {
   store <8 x half> %d, <8 x half>* %y
   ret void
 }
+
+define <8 x half> @si2fp_v8i1_v8f16(<8 x i1> %x) {
+; RV32-LABEL: si2fp_v8i1_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 0(sp)
+; RV32-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV32-NEXT:    vslidedown.vi v26, v25, 7
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 14(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 6
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 12(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 5
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 10(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 4
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 8(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 6(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 4(sp)
+; RV32-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    fcvt.h.w ft0, a0
+; RV32-NEXT:    fsh ft0, 2(sp)
+; RV32-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
+; RV32-NEXT:    vle16.v v8, (sp)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: si2fp_v8i1_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 0(sp)
+; RV64-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV64-NEXT:    vslidedown.vi v26, v25, 7
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 14(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 6
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 12(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 5
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 10(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 4
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 8(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 6(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 4(sp)
+; RV64-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a0, a0
+; RV64-NEXT:    fcvt.h.l ft0, a0
+; RV64-NEXT:    fsh ft0, 2(sp)
+; RV64-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
+; RV64-NEXT:    vle16.v v8, (sp)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %z = sitofp <8 x i1> %x to <8 x half>
+  ret <8 x half> %z
+}
+
+define <8 x half> @ui2fp_v8i1_v8f16(<8 x i1> %x) {
+; RV32-LABEL: ui2fp_v8i1_v8f16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV32-NEXT:    vmv.v.i v25, 0
+; RV32-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 0(sp)
+; RV32-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV32-NEXT:    vslidedown.vi v26, v25, 7
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 14(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 6
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 12(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 5
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 10(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 4
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 8(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 3
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 6(sp)
+; RV32-NEXT:    vslidedown.vi v26, v25, 2
+; RV32-NEXT:    vmv.x.s a0, v26
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 4(sp)
+; RV32-NEXT:    vslidedown.vi v25, v25, 1
+; RV32-NEXT:    vmv.x.s a0, v25
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    fcvt.h.wu ft0, a0
+; RV32-NEXT:    fsh ft0, 2(sp)
+; RV32-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
+; RV32-NEXT:    vle16.v v8, (sp)
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: ui2fp_v8i1_v8f16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -16
+; RV64-NEXT:    .cfi_def_cfa_offset 16
+; RV64-NEXT:    vsetivli zero, 8, e8,mf2,ta,mu
+; RV64-NEXT:    vmv.v.i v25, 0
+; RV64-NEXT:    vmerge.vim v25, v25, 1, v0
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 0(sp)
+; RV64-NEXT:    vsetivli zero, 1, e8,mf2,ta,mu
+; RV64-NEXT:    vslidedown.vi v26, v25, 7
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 14(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 6
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 12(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 5
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 10(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 4
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 8(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 3
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 6(sp)
+; RV64-NEXT:    vslidedown.vi v26, v25, 2
+; RV64-NEXT:    vmv.x.s a0, v26
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 4(sp)
+; RV64-NEXT:    vslidedown.vi v25, v25, 1
+; RV64-NEXT:    vmv.x.s a0, v25
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    fcvt.h.lu ft0, a0
+; RV64-NEXT:    fsh ft0, 2(sp)
+; RV64-NEXT:    vsetivli zero, 8, e16,m1,ta,mu
+; RV64-NEXT:    vle16.v v8, (sp)
+; RV64-NEXT:    addi sp, sp, 16
+; RV64-NEXT:    ret
+  %z = uitofp <8 x i1> %x to <8 x half>
+  ret <8 x half> %z
+}


        


More information about the llvm-commits mailing list