[llvm] 9f087ba - [RISCV] Improve 4x and 8x (s/u)int_to_fp.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 6 08:43:24 PST 2023


Author: Craig Topper
Date: 2023-01-06T08:39:14-08:00
New Revision: 9f087ba05b34eba6bd8a96659cc69283458f7b00

URL: https://github.com/llvm/llvm-project/commit/9f087ba05b34eba6bd8a96659cc69283458f7b00
DIFF: https://github.com/llvm/llvm-project/commit/9f087ba05b34eba6bd8a96659cc69283458f7b00.diff

LOG: [RISCV] Improve 4x and 8x (s/u)int_to_fp.

Previously we emitted a 4x or 8x vzext followed by a vfcvt.
We can instead use a 2x or 4x vzext followed by a vfwcvt.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
    llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 22eebe165259..db1bd64ec095 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3876,7 +3876,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     if (EltSize > (2 * SrcEltSize)) {
       if (IsInt2FP) {
         // Do a regular integer sign/zero extension then convert to float.
-        MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize),
+        MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize / 2),
                                       VT.getVectorElementCount());
         unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
                                  ? ISD::ZERO_EXTEND

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index 671036096c50..27250b427495 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -31,11 +31,10 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_nxv1i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v9, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v9, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v9, v9, 23
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-D-NEXT:    vnsrl.wi v9, v10, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 0
 ; CHECK-D-NEXT:    li a0, 134
@@ -75,11 +74,10 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_nxv2i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v9, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v9, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v9, v9, 23
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-D-NEXT:    vnsrl.wi v9, v10, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 0
 ; CHECK-D-NEXT:    li a0, 134
@@ -119,10 +117,9 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_nxv4i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v10, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v10, v10
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v10, v9
 ; CHECK-D-NEXT:    vnsrl.wi v9, v10, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 0
@@ -163,10 +160,9 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_nxv8i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v12, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v12, v12
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v10, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v12, v10
 ; CHECK-D-NEXT:    vnsrl.wi v10, v12, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v10, 0
@@ -207,10 +203,9 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_nxv16i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v16, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v16, v16
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v12, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v16, v12
 ; CHECK-D-NEXT:    vnsrl.wi v12, v16, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v10, v12, 0
@@ -1648,10 +1643,9 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv1i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v9, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 0
@@ -1689,10 +1683,9 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv2i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v9, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 0
@@ -1730,13 +1723,12 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv4i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v10, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v10
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v10, v8, 23
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-D-NEXT:    vnsrl.wi v8, v10, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-D-NEXT:    vnsrl.wi v8, v8, 0
 ; CHECK-D-NEXT:    li a0, 134
 ; CHECK-D-NEXT:    vrsub.vx v8, v8, a0
 ; CHECK-D-NEXT:    ret
@@ -1771,15 +1763,14 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv8i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v12, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v12
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v12, v8, 23
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v10, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v12, v10
+; CHECK-D-NEXT:    vnsrl.wi v8, v12, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-D-NEXT:    vnsrl.wi v10, v8, 0
 ; CHECK-D-NEXT:    li a0, 134
-; CHECK-D-NEXT:    vrsub.vx v8, v8, a0
+; CHECK-D-NEXT:    vrsub.vx v8, v10, a0
 ; CHECK-D-NEXT:    ret
   %a = call <vscale x 8 x i8> @llvm.ctlz.nxv8i8(<vscale x 8 x i8> %va, i1 true)
   ret <vscale x 8 x i8> %a
@@ -1812,15 +1803,14 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv16i8:
 ; CHECK-D:       # %bb.0:
-; CHECK-D-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v16, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v16
-; CHECK-D-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v16, v8, 23
+; CHECK-D-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v12, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v16, v12
+; CHECK-D-NEXT:    vnsrl.wi v8, v16, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-D-NEXT:    vnsrl.wi v12, v8, 0
 ; CHECK-D-NEXT:    li a0, 134
-; CHECK-D-NEXT:    vrsub.vx v8, v8, a0
+; CHECK-D-NEXT:    vrsub.vx v8, v12, a0
 ; CHECK-D-NEXT:    ret
   %a = call <vscale x 16 x i8> @llvm.ctlz.nxv16i8(<vscale x 16 x i8> %va, i1 true)
   ret <vscale x 16 x i8> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
index 51dabd7b96b4..fd02061ffbc1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
@@ -31,10 +31,9 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v9, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v10, v9
-; CHECK-D-NEXT:    vfcvt.f.xu.v v9, v10
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v10, v9
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v9, v10
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 0
@@ -75,10 +74,9 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v9, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v10, v9
-; CHECK-D-NEXT:    vfcvt.f.xu.v v9, v10
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v10, v9
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v9, v10
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 0
@@ -119,11 +117,10 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v9, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v10, v9
-; CHECK-D-NEXT:    vfcvt.f.xu.v v10, v10
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v9, v10, 23
+; CHECK-D-NEXT:    vzext.vf2 v10, v9
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v12, v10
+; CHECK-D-NEXT:    vnsrl.wi v9, v12, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v9, 0
 ; CHECK-D-NEXT:    li a0, 127
@@ -163,10 +160,9 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v9, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v12, v9
-; CHECK-D-NEXT:    vfcvt.f.xu.v v12, v12
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v10, v9
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v12, v10
 ; CHECK-D-NEXT:    vnsrl.wi v10, v12, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v9, v10, 0
@@ -207,10 +203,9 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-D-NEXT:    vand.vv v10, v8, v10
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v16, v10
-; CHECK-D-NEXT:    vfcvt.f.xu.v v16, v16
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v12, v10
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v16, v12
 ; CHECK-D-NEXT:    vnsrl.wi v12, v16, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v10, v12, 0
@@ -1450,10 +1445,9 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v8, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v9, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v9
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 0
@@ -1491,10 +1485,9 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v8, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v9, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v9
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf4, ta, ma
 ; CHECK-D-NEXT:    vnsrl.wi v8, v8, 0
@@ -1532,13 +1525,12 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v8, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v10, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v10
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v10, v8, 23
+; CHECK-D-NEXT:    vzext.vf2 v9, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v10, v9
+; CHECK-D-NEXT:    vnsrl.wi v8, v10, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-D-NEXT:    vnsrl.wi v8, v8, 0
 ; CHECK-D-NEXT:    li a0, 127
 ; CHECK-D-NEXT:    vsub.vx v8, v8, a0
 ; CHECK-D-NEXT:    ret
@@ -1573,15 +1565,14 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v9, v8, 0
 ; CHECK-D-NEXT:    vand.vv v8, v8, v9
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v12, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v12
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v12, v8, 23
+; CHECK-D-NEXT:    vzext.vf2 v10, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v12, v10
+; CHECK-D-NEXT:    vnsrl.wi v8, v12, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-D-NEXT:    vnsrl.wi v10, v8, 0
 ; CHECK-D-NEXT:    li a0, 127
-; CHECK-D-NEXT:    vsub.vx v8, v8, a0
+; CHECK-D-NEXT:    vsub.vx v8, v10, a0
 ; CHECK-D-NEXT:    ret
   %a = call <vscale x 8 x i8> @llvm.cttz.nxv8i8(<vscale x 8 x i8> %va, i1 true)
   ret <vscale x 8 x i8> %a
@@ -1614,15 +1605,14 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; CHECK-D-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-D-NEXT:    vrsub.vi v10, v8, 0
 ; CHECK-D-NEXT:    vand.vv v8, v8, v10
-; CHECK-D-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-D-NEXT:    vzext.vf4 v16, v8
-; CHECK-D-NEXT:    vfcvt.f.xu.v v8, v16
 ; CHECK-D-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v16, v8, 23
+; CHECK-D-NEXT:    vzext.vf2 v12, v8
+; CHECK-D-NEXT:    vfwcvt.f.xu.v v16, v12
+; CHECK-D-NEXT:    vnsrl.wi v8, v16, 23
 ; CHECK-D-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-D-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-D-NEXT:    vnsrl.wi v12, v8, 0
 ; CHECK-D-NEXT:    li a0, 127
-; CHECK-D-NEXT:    vsub.vx v8, v8, a0
+; CHECK-D-NEXT:    vsub.vx v8, v12, a0
 ; CHECK-D-NEXT:    ret
   %a = call <vscale x 16 x i8> @llvm.cttz.nxv16i8(<vscale x 16 x i8> %va, i1 true)
   ret <vscale x 16 x i8> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
index 21d6293871bb..cc08aeb8cd2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
@@ -39,11 +39,10 @@ define void @ctlz_v16i8(ptr %x, ptr %y) nounwind {
 ;
 ; LMULMAX8-LABEL: ctlz_v16i8:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; LMULMAX8-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
-; LMULMAX8-NEXT:    vzext.vf4 v12, v8
-; LMULMAX8-NEXT:    vfcvt.f.xu.v v12, v12
-; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; LMULMAX8-NEXT:    vzext.vf2 v10, v8
+; LMULMAX8-NEXT:    vfwcvt.f.xu.v v12, v10
 ; LMULMAX8-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
 ; LMULMAX8-NEXT:    vnsrl.wi v9, v10, 0
@@ -838,11 +837,10 @@ define void @ctlz_v32i8(ptr %x, ptr %y) nounwind {
 ; LMULMAX8-LABEL: ctlz_v32i8:
 ; LMULMAX8:       # %bb.0:
 ; LMULMAX8-NEXT:    li a1, 32
-; LMULMAX8-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; LMULMAX8-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
-; LMULMAX8-NEXT:    vzext.vf4 v16, v8
-; LMULMAX8-NEXT:    vfcvt.f.xu.v v16, v16
-; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; LMULMAX8-NEXT:    vzext.vf2 v12, v8
+; LMULMAX8-NEXT:    vfwcvt.f.xu.v v16, v12
 ; LMULMAX8-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
 ; LMULMAX8-NEXT:    vnsrl.wi v10, v12, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
index 27b82927e5ee..144f469bf143 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
@@ -40,10 +40,9 @@ define void @cttz_v16i8(ptr %x, ptr %y) nounwind {
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
 ; LMULMAX8-NEXT:    vrsub.vi v9, v8, 0
 ; LMULMAX8-NEXT:    vand.vv v9, v8, v9
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; LMULMAX8-NEXT:    vzext.vf4 v12, v9
-; LMULMAX8-NEXT:    vfcvt.f.xu.v v12, v12
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; LMULMAX8-NEXT:    vzext.vf2 v10, v9
+; LMULMAX8-NEXT:    vfwcvt.f.xu.v v12, v10
 ; LMULMAX8-NEXT:    vnsrl.wi v10, v12, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
 ; LMULMAX8-NEXT:    vnsrl.wi v9, v10, 0
@@ -736,10 +735,9 @@ define void @cttz_v32i8(ptr %x, ptr %y) nounwind {
 ; LMULMAX8-NEXT:    vle8.v v8, (a0)
 ; LMULMAX8-NEXT:    vrsub.vi v10, v8, 0
 ; LMULMAX8-NEXT:    vand.vv v10, v8, v10
-; LMULMAX8-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; LMULMAX8-NEXT:    vzext.vf4 v16, v10
-; LMULMAX8-NEXT:    vfcvt.f.xu.v v16, v16
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e16, m4, ta, ma
+; LMULMAX8-NEXT:    vzext.vf2 v12, v10
+; LMULMAX8-NEXT:    vfwcvt.f.xu.v v16, v12
 ; LMULMAX8-NEXT:    vnsrl.wi v12, v16, 23
 ; LMULMAX8-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
 ; LMULMAX8-NEXT:    vnsrl.wi v10, v12, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
index c01ef340cda4..462d2ece3e0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll
@@ -35,10 +35,10 @@ define void @ui2fp_v2i32_v2f32(ptr %x, ptr %y) {
 define <2 x float> @si2fp_v2i1_v2f32(<2 x i1> %x) {
 ; CHECK-LABEL: si2fp_v2i1_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %z = sitofp <2 x i1> %x to <2 x float>
   ret <2 x float> %z
@@ -50,9 +50,9 @@ define <2 x float> @si2fp_v2i7_v2f32(<2 x i7> %x) {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 1
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %z = sitofp <2 x i7> %x to <2 x float>
   ret <2 x float> %z
@@ -64,9 +64,9 @@ define <2 x float> @ui2fp_v2i7_v2f32(<2 x i7> %x) {
 ; CHECK-NEXT:    li a0, 127
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vand.vx v8, v8, a0
-; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
+; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %z = uitofp <2 x i7> %x to <2 x float>
   ret <2 x float> %z
@@ -75,10 +75,10 @@ define <2 x float> @ui2fp_v2i7_v2f32(<2 x i7> %x) {
 define <2 x float> @ui2fp_v2i1_v2f32(<2 x i1> %x) {
 ; CHECK-LABEL: ui2fp_v2i1_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %z = uitofp <2 x i1> %x to <2 x float>
   ret <2 x float> %z
@@ -141,18 +141,18 @@ define void @ui2fp_v8i32_v8f32(ptr %x, ptr %y) {
 define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) {
 ; LMULMAX8-LABEL: si2fp_v8i1_v8f32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; LMULMAX8-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
-; LMULMAX8-NEXT:    vmerge.vim v8, v8, -1, v0
-; LMULMAX8-NEXT:    vfcvt.f.x.v v8, v8
+; LMULMAX8-NEXT:    vmerge.vim v10, v8, -1, v0
+; LMULMAX8-NEXT:    vfwcvt.f.x.v v8, v10
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: si2fp_v8i1_v8f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
-; LMULMAX1-NEXT:    vmerge.vim v8, v9, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v8, v8
+; LMULMAX1-NEXT:    vmerge.vim v10, v9, -1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v8, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v10, 0
 ; LMULMAX1-NEXT:    vmerge.vim v10, v10, 1, v0
@@ -160,9 +160,9 @@ define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vslidedown.vi v10, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v9, v9, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v9, v9
+; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v10, v9, -1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v9, v10
 ; LMULMAX1-NEXT:    ret
   %z = sitofp <8 x i1> %x to <8 x float>
   ret <8 x float> %z
@@ -171,18 +171,18 @@ define <8 x float> @si2fp_v8i1_v8f32(<8 x i1> %x) {
 define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
 ; LMULMAX8-LABEL: ui2fp_v8i1_v8f32:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; LMULMAX8-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
-; LMULMAX8-NEXT:    vmerge.vim v8, v8, 1, v0
-; LMULMAX8-NEXT:    vfcvt.f.xu.v v8, v8
+; LMULMAX8-NEXT:    vmerge.vim v10, v8, 1, v0
+; LMULMAX8-NEXT:    vfwcvt.f.xu.v v8, v10
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: ui2fp_v8i1_v8f32:
 ; LMULMAX1:       # %bb.0:
-; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; LMULMAX1-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v9, 0
-; LMULMAX1-NEXT:    vmerge.vim v8, v9, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v8, v8
+; LMULMAX1-NEXT:    vmerge.vim v10, v9, 1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v8, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v10, 0
 ; LMULMAX1-NEXT:    vmerge.vim v10, v10, 1, v0
@@ -190,9 +190,9 @@ define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vslidedown.vi v10, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v9, v9, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v9, v9
+; LMULMAX1-NEXT:    vsetvli zero, zero, e16, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v10, v9, 1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v9, v10
 ; LMULMAX1-NEXT:    ret
   %z = uitofp <8 x i1> %x to <8 x float>
   ret <8 x float> %z
@@ -201,10 +201,10 @@ define <8 x float> @ui2fp_v8i1_v8f32(<8 x i1> %x) {
 define void @si2fp_v2i16_v2f64(ptr %x, ptr %y) {
 ; CHECK-LABEL: si2fp_v2i16_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x i16>, ptr %x
@@ -216,10 +216,10 @@ define void @si2fp_v2i16_v2f64(ptr %x, ptr %y) {
 define void @ui2fp_v2i16_v2f64(ptr %x, ptr %y) {
 ; CHECK-LABEL: ui2fp_v2i16_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    vse64.v v8, (a1)
 ; CHECK-NEXT:    ret
   %a = load <2 x i16>, ptr %x
@@ -231,11 +231,11 @@ define void @ui2fp_v2i16_v2f64(ptr %x, ptr %y) {
 define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
 ; LMULMAX8-LABEL: si2fp_v8i16_v8f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX8-NEXT:    vle16.v v8, (a0)
-; LMULMAX8-NEXT:    vsext.vf4 v12, v8
-; LMULMAX8-NEXT:    vfcvt.f.x.v v8, v12
-; LMULMAX8-NEXT:    vse64.v v8, (a1)
+; LMULMAX8-NEXT:    vsext.vf2 v10, v8
+; LMULMAX8-NEXT:    vfwcvt.f.x.v v12, v10
+; LMULMAX8-NEXT:    vse64.v v12, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: si2fp_v8i16_v8f64:
@@ -244,20 +244,20 @@ define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
 ; LMULMAX1-NEXT:    vle16.v v8, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vsext.vf4 v10, v9
-; LMULMAX1-NEXT:    vfcvt.f.x.v v9, v10
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vsext.vf2 v10, v9
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v9, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v10, v8, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v11, v10, 2
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vsext.vf4 v12, v11
-; LMULMAX1-NEXT:    vfcvt.f.x.v v11, v12
-; LMULMAX1-NEXT:    vsext.vf4 v12, v10
-; LMULMAX1-NEXT:    vfcvt.f.x.v v10, v12
-; LMULMAX1-NEXT:    vsext.vf4 v12, v8
-; LMULMAX1-NEXT:    vfcvt.f.x.v v8, v12
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vsext.vf2 v12, v11
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v11, v12
+; LMULMAX1-NEXT:    vsext.vf2 v12, v10
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v10, v12
+; LMULMAX1-NEXT:    vsext.vf2 v12, v8
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v8, v12
 ; LMULMAX1-NEXT:    addi a0, a1, 32
 ; LMULMAX1-NEXT:    vse64.v v10, (a0)
 ; LMULMAX1-NEXT:    vse64.v v8, (a1)
@@ -275,11 +275,11 @@ define void @si2fp_v8i16_v8f64(ptr %x, ptr %y) {
 define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
 ; LMULMAX8-LABEL: ui2fp_v8i16_v8f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX8-NEXT:    vle16.v v8, (a0)
-; LMULMAX8-NEXT:    vzext.vf4 v12, v8
-; LMULMAX8-NEXT:    vfcvt.f.xu.v v8, v12
-; LMULMAX8-NEXT:    vse64.v v8, (a1)
+; LMULMAX8-NEXT:    vzext.vf2 v10, v8
+; LMULMAX8-NEXT:    vfwcvt.f.xu.v v12, v10
+; LMULMAX8-NEXT:    vse64.v v12, (a1)
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: ui2fp_v8i16_v8f64:
@@ -288,20 +288,20 @@ define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
 ; LMULMAX1-NEXT:    vle16.v v8, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v9, v8, 2
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vzext.vf4 v10, v9
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v9, v10
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vzext.vf2 v10, v9
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v9, v10
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e16, m1, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v10, v8, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e16, mf2, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v11, v10, 2
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vzext.vf4 v12, v11
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v11, v12
-; LMULMAX1-NEXT:    vzext.vf4 v12, v10
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v10, v12
-; LMULMAX1-NEXT:    vzext.vf4 v12, v8
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v8, v12
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vzext.vf2 v12, v11
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v11, v12
+; LMULMAX1-NEXT:    vzext.vf2 v12, v10
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v10, v12
+; LMULMAX1-NEXT:    vzext.vf2 v12, v8
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v8, v12
 ; LMULMAX1-NEXT:    addi a0, a1, 32
 ; LMULMAX1-NEXT:    vse64.v v10, (a0)
 ; LMULMAX1-NEXT:    vse64.v v8, (a1)
@@ -319,19 +319,19 @@ define void @ui2fp_v8i16_v8f64(ptr %x, ptr %y) {
 define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX8-LABEL: si2fp_v8i1_v8f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
-; LMULMAX8-NEXT:    vmerge.vim v8, v8, -1, v0
-; LMULMAX8-NEXT:    vfcvt.f.x.v v8, v8
+; LMULMAX8-NEXT:    vmerge.vim v12, v8, -1, v0
+; LMULMAX8-NEXT:    vfwcvt.f.x.v v8, v12
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: si2fp_v8i1_v8f64:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vmv1r.v v10, v0
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v11, 0
-; LMULMAX1-NEXT:    vmerge.vim v8, v11, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v8, v8
+; LMULMAX1-NEXT:    vmerge.vim v9, v11, -1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v8, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v12, 0
 ; LMULMAX1-NEXT:    vmerge.vim v9, v12, 1, v0
@@ -339,9 +339,9 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vslidedown.vi v9, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v9, v11, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v9, v9
+; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v13, v11, -1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v9, v13
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v13, 0
 ; LMULMAX1-NEXT:    vmv1r.v v0, v10
@@ -350,18 +350,18 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vslidedown.vi v10, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v10, v11, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v10, v10
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v13, v11, -1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v10, v13
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmerge.vim v12, v12, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v12, v12, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v11, v11, -1, v0
-; LMULMAX1-NEXT:    vfcvt.f.x.v v11, v11
+; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v12, v11, -1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.x.v v11, v12
 ; LMULMAX1-NEXT:    ret
   %z = sitofp <8 x i1> %x to <8 x double>
   ret <8 x double> %z
@@ -370,19 +370,19 @@ define <8 x double> @si2fp_v8i1_v8f64(<8 x i1> %x) {
 define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX8-LABEL: ui2fp_v8i1_v8f64:
 ; LMULMAX8:       # %bb.0:
-; LMULMAX8-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; LMULMAX8-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; LMULMAX8-NEXT:    vmv.v.i v8, 0
-; LMULMAX8-NEXT:    vmerge.vim v8, v8, 1, v0
-; LMULMAX8-NEXT:    vfcvt.f.xu.v v8, v8
+; LMULMAX8-NEXT:    vmerge.vim v12, v8, 1, v0
+; LMULMAX8-NEXT:    vfwcvt.f.xu.v v8, v12
 ; LMULMAX8-NEXT:    ret
 ;
 ; LMULMAX1-LABEL: ui2fp_v8i1_v8f64:
 ; LMULMAX1:       # %bb.0:
 ; LMULMAX1-NEXT:    vmv1r.v v10, v0
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v11, 0
-; LMULMAX1-NEXT:    vmerge.vim v8, v11, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v8, v8
+; LMULMAX1-NEXT:    vmerge.vim v9, v11, 1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v8, v9
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v12, 0
 ; LMULMAX1-NEXT:    vmerge.vim v9, v12, 1, v0
@@ -390,9 +390,9 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vslidedown.vi v9, v9, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v9, 0
-; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v9, v11, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v9, v9
+; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v13, v11, 1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v9, v13
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; LMULMAX1-NEXT:    vmv.v.i v13, 0
 ; LMULMAX1-NEXT:    vmv1r.v v0, v10
@@ -401,18 +401,18 @@ define <8 x double> @ui2fp_v8i1_v8f64(<8 x i1> %x) {
 ; LMULMAX1-NEXT:    vslidedown.vi v10, v10, 4
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v10, 0
-; LMULMAX1-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v10, v11, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v10, v10
+; LMULMAX1-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v13, v11, 1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v10, v13
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vmerge.vim v12, v12, 1, v0
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; LMULMAX1-NEXT:    vslidedown.vi v12, v12, 2
 ; LMULMAX1-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; LMULMAX1-NEXT:    vmsne.vi v0, v12, 0
-; LMULMAX1-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; LMULMAX1-NEXT:    vmerge.vim v11, v11, 1, v0
-; LMULMAX1-NEXT:    vfcvt.f.xu.v v11, v11
+; LMULMAX1-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; LMULMAX1-NEXT:    vmerge.vim v12, v11, 1, v0
+; LMULMAX1-NEXT:    vfwcvt.f.xu.v v11, v12
 ; LMULMAX1-NEXT:    ret
   %z = uitofp <8 x i1> %x to <8 x double>
   ret <8 x double> %z
@@ -453,10 +453,10 @@ define void @ui2fp_v2i64_v2f16(ptr %x, ptr %y) {
 define <2 x half> @si2fp_v2i1_v2f16(<2 x i1> %x) {
 ; CHECK-LABEL: si2fp_v2i1_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %z = sitofp <2 x i1> %x to <2 x half>
   ret <2 x half> %z
@@ -465,10 +465,10 @@ define <2 x half> @si2fp_v2i1_v2f16(<2 x i1> %x) {
 define <2 x half> @ui2fp_v2i1_v2f16(<2 x i1> %x) {
 ; CHECK-LABEL: ui2fp_v2i1_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %z = uitofp <2 x i1> %x to <2 x half>
   ret <2 x half> %z
@@ -577,10 +577,10 @@ define void @ui2fp_v8i64_v8f16(ptr %x, ptr %y) {
 define <8 x half> @si2fp_v8i1_v8f16(<8 x i1> %x) {
 ; CHECK-LABEL: si2fp_v8i1_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %z = sitofp <8 x i1> %x to <8 x half>
   ret <8 x half> %z
@@ -589,10 +589,10 @@ define <8 x half> @si2fp_v8i1_v8f16(<8 x i1> %x) {
 define <8 x half> @ui2fp_v8i1_v8f16(<8 x i1> %x) {
 ; CHECK-LABEL: ui2fp_v8i1_v8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %z = uitofp <8 x i1> %x to <8 x half>
   ret <8 x half> %z

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
index 4778acba923e..f28bb29febea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
@@ -7,10 +7,10 @@
 define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 1 x i1> %va to <vscale x 1 x half>
   ret <vscale x 1 x half> %evec
@@ -19,10 +19,10 @@ define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
 define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 1 x i1> %va to <vscale x 1 x half>
   ret <vscale x 1 x half> %evec
@@ -31,10 +31,10 @@ define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
 define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 1 x i1> %va to <vscale x 1 x float>
   ret <vscale x 1 x float> %evec
@@ -43,10 +43,10 @@ define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
 define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 1 x i1> %va to <vscale x 1 x float>
   ret <vscale x 1 x float> %evec
@@ -55,10 +55,10 @@ define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
 define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 1 x i1> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
@@ -67,10 +67,10 @@ define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
 define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 1 x i1> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
@@ -79,10 +79,10 @@ define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
 define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 2 x i1> %va to <vscale x 2 x half>
   ret <vscale x 2 x half> %evec
@@ -91,10 +91,10 @@ define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
 define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 2 x i1> %va to <vscale x 2 x half>
   ret <vscale x 2 x half> %evec
@@ -103,10 +103,10 @@ define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
 define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 2 x i1> %va to <vscale x 2 x float>
   ret <vscale x 2 x float> %evec
@@ -115,10 +115,10 @@ define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
 define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 2 x i1> %va to <vscale x 2 x float>
   ret <vscale x 2 x float> %evec
@@ -127,10 +127,10 @@ define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
 define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 2 x i1> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
@@ -139,10 +139,10 @@ define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
 define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 2 x i1> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
@@ -151,10 +151,10 @@ define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
 define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 4 x i1> %va to <vscale x 4 x half>
   ret <vscale x 4 x half> %evec
@@ -163,10 +163,10 @@ define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
 define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v9, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 4 x i1> %va to <vscale x 4 x half>
   ret <vscale x 4 x half> %evec
@@ -175,10 +175,10 @@ define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
 define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 4 x i1> %va to <vscale x 4 x float>
   ret <vscale x 4 x float> %evec
@@ -187,10 +187,10 @@ define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
 define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 4 x i1> %va to <vscale x 4 x float>
   ret <vscale x 4 x float> %evec
@@ -199,10 +199,10 @@ define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
 define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 4 x i1> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
@@ -211,10 +211,10 @@ define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
 define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 4 x i1> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
@@ -223,10 +223,10 @@ define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
 define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v10, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 8 x i1> %va to <vscale x 8 x half>
   ret <vscale x 8 x half> %evec
@@ -235,10 +235,10 @@ define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
 define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v10, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 8 x i1> %va to <vscale x 8 x half>
   ret <vscale x 8 x half> %evec
@@ -247,10 +247,10 @@ define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
 define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 8 x i1> %va to <vscale x 8 x float>
   ret <vscale x 8 x float> %evec
@@ -259,10 +259,10 @@ define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
 define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 8 x i1> %va to <vscale x 8 x float>
   ret <vscale x 8 x float> %evec
@@ -271,10 +271,10 @@ define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
 define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 8 x i1> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec
@@ -283,10 +283,10 @@ define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
 define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 8 x i1> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec
@@ -295,10 +295,10 @@ define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
 define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v12, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 16 x i1> %va to <vscale x 16 x half>
   ret <vscale x 16 x half> %evec
@@ -307,10 +307,10 @@ define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
 define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 16 x i1> %va to <vscale x 16 x half>
   ret <vscale x 16 x half> %evec
@@ -319,10 +319,10 @@ define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
 define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 16 x i1> %va to <vscale x 16 x float>
   ret <vscale x 16 x float> %evec
@@ -331,10 +331,10 @@ define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
 define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 16 x i1> %va to <vscale x 16 x float>
   ret <vscale x 16 x float> %evec
@@ -343,10 +343,10 @@ define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
 define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
 ; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, -1, v0
-; CHECK-NEXT:    vfcvt.f.x.v v8, v8
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 32 x i1> %va to <vscale x 32 x half>
   ret <vscale x 32 x half> %evec
@@ -355,10 +355,10 @@ define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
 define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
 ; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
+; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
-; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v8
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 32 x i1> %va to <vscale x 32 x half>
   ret <vscale x 32 x half> %evec
@@ -413,9 +413,9 @@ define <vscale x 1 x half> @vuitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) {
 define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 1 x i8> %va to <vscale x 1 x float>
   ret <vscale x 1 x float> %evec
@@ -424,9 +424,9 @@ define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
 define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 1 x i8> %va to <vscale x 1 x float>
   ret <vscale x 1 x float> %evec
@@ -435,9 +435,9 @@ define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
 define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vsext.vf8 v9, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf4 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 1 x i8> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
@@ -446,9 +446,9 @@ define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
 define <vscale x 1 x double> @vuitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vzext.vf8 v9, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf4 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 1 x i8> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
@@ -479,9 +479,9 @@ define <vscale x 2 x half> @vuitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) {
 define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 2 x i8> %va to <vscale x 2 x float>
   ret <vscale x 2 x float> %evec
@@ -490,9 +490,9 @@ define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
 define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 2 x i8> %va to <vscale x 2 x float>
   ret <vscale x 2 x float> %evec
@@ -501,9 +501,9 @@ define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
 define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vsext.vf8 v10, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v10
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsext.vf4 v10, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 2 x i8> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
@@ -512,9 +512,9 @@ define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
 define <vscale x 2 x double> @vuitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vzext.vf8 v10, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v10
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vzext.vf4 v10, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 2 x i8> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
@@ -545,9 +545,9 @@ define <vscale x 4 x half> @vuitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) {
 define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vsext.vf4 v10, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v10
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 4 x i8> %va to <vscale x 4 x float>
   ret <vscale x 4 x float> %evec
@@ -556,9 +556,9 @@ define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
 define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT:    vzext.vf4 v10, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v10
+; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 4 x i8> %va to <vscale x 4 x float>
   ret <vscale x 4 x float> %evec
@@ -567,9 +567,9 @@ define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
 define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vsext.vf8 v12, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v12
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsext.vf4 v12, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 4 x i8> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
@@ -578,9 +578,9 @@ define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
 define <vscale x 4 x double> @vuitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vzext.vf8 v12, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v12
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 4 x i8> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
@@ -611,9 +611,9 @@ define <vscale x 8 x half> @vuitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) {
 define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vsext.vf4 v12, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v12
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 8 x i8> %va to <vscale x 8 x float>
   ret <vscale x 8 x float> %evec
@@ -622,9 +622,9 @@ define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
 define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vzext.vf4 v12, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v12
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 8 x i8> %va to <vscale x 8 x float>
   ret <vscale x 8 x float> %evec
@@ -633,9 +633,9 @@ define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
 define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vsext.vf8 v16, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsext.vf4 v16, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 8 x i8> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec
@@ -644,9 +644,9 @@ define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
 define <vscale x 8 x double> @vuitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vzext.vf8 v16, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v16, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 8 x i8> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec
@@ -677,9 +677,9 @@ define <vscale x 16 x half> @vuitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) {
 define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vsext.vf4 v16, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v16, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 16 x i8> %va to <vscale x 16 x float>
   ret <vscale x 16 x float> %evec
@@ -688,9 +688,9 @@ define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
 define <vscale x 16 x float> @vuitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
 ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vzext.vf4 v16, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v16, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 16 x i8> %va to <vscale x 16 x float>
   ret <vscale x 16 x float> %evec
@@ -763,9 +763,9 @@ define <vscale x 1 x float> @vuitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) {
 define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 1 x i16> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
@@ -774,9 +774,9 @@ define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
 define <vscale x 1 x double> @vuitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
 ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v9
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v9, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v9
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 1 x i16> %va to <vscale x 1 x double>
   ret <vscale x 1 x double> %evec
@@ -827,9 +827,9 @@ define <vscale x 2 x float> @vuitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) {
 define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vsext.vf4 v10, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v10
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vsext.vf2 v10, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 2 x i16> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
@@ -838,9 +838,9 @@ define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
 define <vscale x 2 x double> @vuitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
 ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT:    vzext.vf4 v10, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v10
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v10
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 2 x i16> %va to <vscale x 2 x double>
   ret <vscale x 2 x double> %evec
@@ -891,9 +891,9 @@ define <vscale x 4 x float> @vuitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) {
 define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vsext.vf4 v12, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v12
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vsext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 4 x i16> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
@@ -902,9 +902,9 @@ define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
 define <vscale x 4 x double> @vuitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
 ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT:    vzext.vf4 v12, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v12
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v12, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v12
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 4 x i16> %va to <vscale x 4 x double>
   ret <vscale x 4 x double> %evec
@@ -955,9 +955,9 @@ define <vscale x 8 x float> @vuitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) {
 define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vsext.vf4 v16, v8
-; CHECK-NEXT:    vfcvt.f.x.v v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vsext.vf2 v16, v8
+; CHECK-NEXT:    vfwcvt.f.x.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = sitofp <vscale x 8 x i16> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec
@@ -966,9 +966,9 @@ define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
 define <vscale x 8 x double> @vuitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
 ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT:    vzext.vf4 v16, v8
-; CHECK-NEXT:    vfcvt.f.xu.v v8, v16
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vzext.vf2 v16, v8
+; CHECK-NEXT:    vfwcvt.f.xu.v v8, v16
 ; CHECK-NEXT:    ret
   %evec = uitofp <vscale x 8 x i16> %va to <vscale x 8 x double>
   ret <vscale x 8 x double> %evec


        


More information about the llvm-commits mailing list