[llvm] [RISCV] Fold vector shift of sext/zext to widening multiply (PR #121563)
Piotr Fusik via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 9 06:01:30 PST 2025
https://github.com/pfusik updated https://github.com/llvm/llvm-project/pull/121563
>From 15412e74bf32e8588dab70427ff178720cbf6c26 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Fri, 3 Jan 2025 12:42:10 +0100
Subject: [PATCH 1/5] [RISCV] Fold vector shift of sext/zext to widening
multiply
(shl (sext X), C) -> (vwmulsu X, 1u << C)
(shl (zext X), C) -> (vwmulu X, 1u << C)
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 76 ++-
llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll | 202 ++++----
.../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll | 214 ++++-----
.../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll | 423 ++++++++---------
.../CodeGen/RISCV/rvv/vpscatter-sdnode.ll | 431 ++++++++----------
llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll | 21 +-
llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll | 34 +-
7 files changed, 734 insertions(+), 667 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 04dd23d9cdaa20..955a15393ca8a1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17341,6 +17341,78 @@ static SDValue combineScalarCTPOPToVCPOP(SDNode *N, SelectionDAG &DAG,
return DAG.getZExtOrTrunc(Pop, DL, VT);
}
+static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+
+ // (shl (zext x), y) -> (vwsll x, y)
+ if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
+ return V;
+
+ // (shl (sext x), C) -> (vwmulsu x, 1u << C)
+ // (shl (zext x), C) -> (vwmulu x, 1u << C)
+
+ SDValue LHS = N->getOperand(0);
+ if (!LHS.hasOneUse())
+ return SDValue();
+ unsigned Opcode;
+ switch (LHS.getOpcode()) {
+ case ISD::SIGN_EXTEND:
+ Opcode = RISCVISD::VWMULSU_VL;
+ break;
+ case ISD::ZERO_EXTEND:
+ Opcode = RISCVISD::VWMULU_VL;
+ break;
+ default:
+ return SDValue();
+ }
+
+ SDValue RHS = N->getOperand(1);
+ APInt ShAmt;
+ if (!ISD::isConstantSplatVector(RHS.getNode(), ShAmt))
+ return SDValue();
+
+ // Better foldings:
+ // (shl (sext x), 1) -> (vwadd x, x)
+ // (shl (zext x), 1) -> (vwaddu x, x)
+ uint64_t ShAmtInt = ShAmt.getZExtValue();
+ if (ShAmtInt <= 1)
+ return SDValue();
+
+ SDValue NarrowOp = LHS.getOperand(0);
+ EVT NarrowVT = NarrowOp.getValueType();
+ uint64_t NarrowBits = NarrowVT.getScalarSizeInBits();
+ if (ShAmtInt >= NarrowBits)
+ return SDValue();
+ EVT VT = N->getValueType(0);
+ if (NarrowBits * 2 != VT.getScalarSizeInBits())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+ SDValue Passthru, Mask, VL;
+ switch (N->getOpcode()) {
+ case ISD::SHL:
+ if (!VT.isScalableVector())
+ return SDValue();
+ Passthru = DAG.getUNDEF(VT);
+ std::tie(Mask, VL) =
+ getDefaultScalableVLOps(VT.getSimpleVT(), DL, DAG, Subtarget);
+ break;
+ case RISCVISD::SHL_VL:
+ Passthru = N->getOperand(2);
+ Mask = N->getOperand(3);
+ VL = N->getOperand(4);
+ break;
+ default:
+ llvm_unreachable("Expected SHL");
+ }
+ return DAG.getNode(Opcode, DL, VT, NarrowOp,
+ DAG.getConstant(1ULL << ShAmtInt, SDLoc(RHS), NarrowVT),
+ Passthru, Mask, VL);
+}
+
SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -17970,7 +18042,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
break;
}
case RISCVISD::SHL_VL:
- if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
+ if (SDValue V = combineSHL(N, DCI, Subtarget))
return V;
[[fallthrough]];
case RISCVISD::SRA_VL:
@@ -17995,7 +18067,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SRL:
case ISD::SHL: {
if (N->getOpcode() == ISD::SHL) {
- if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
+ if (SDValue V = combineSHL(N, DCI, Subtarget))
return V;
}
SDValue ShAmt = N->getOperand(1);
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 9ee2324f615dd8..0fad09f27007c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -775,11 +775,11 @@ define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i8_nxv8i32(ptr %base, <vscal
define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
@@ -791,10 +791,11 @@ define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscal
define <vscale x 8 x i32> @mgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -815,10 +816,11 @@ define <vscale x 8 x i32> @mgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x
define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -840,10 +842,11 @@ define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vsca
define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT: vzext.vf2 v16, v8
-; CHECK-NEXT: vsll.vi v8, v16, 2
-; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v8, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v12, (a0), v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
@@ -863,10 +866,9 @@ define <vscale x 8 x i32> @mgather_baseidx_nxv8i32(ptr %base, <vscale x 8 x i32>
;
; RV64-LABEL: mgather_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, mu
+; RV64-NEXT: vwmulsu.vx v16, v8, a1
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
@@ -1034,11 +1036,11 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(ptr %base, <vscal
define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v16, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
@@ -1050,11 +1052,11 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscal
define <vscale x 8 x i64> @mgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -1074,11 +1076,11 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x
define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -1099,11 +1101,11 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vsca
define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v12, v8
-; CHECK-NEXT: vsll.vi v8, v12, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v12, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
@@ -1124,10 +1126,11 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i32_nxv8i64(ptr %base, <vscale x
;
; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
@@ -1147,10 +1150,11 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(ptr %base, <vsca
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
@@ -1171,10 +1175,11 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(ptr %base, <vsca
;
; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vzext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
@@ -1845,11 +1850,11 @@ define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i8_nxv8f32(ptr %base, <vsc
define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
@@ -1861,10 +1866,11 @@ define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vsc
define <vscale x 8 x float> @mgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -1885,10 +1891,11 @@ define <vscale x 8 x float> @mgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale
define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -1910,10 +1917,11 @@ define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vs
define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT: vzext.vf2 v16, v8
-; CHECK-NEXT: vsll.vi v8, v16, 2
-; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v8, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v12, (a0), v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
@@ -1933,10 +1941,9 @@ define <vscale x 8 x float> @mgather_baseidx_nxv8f32(ptr %base, <vscale x 8 x i3
;
; RV64-LABEL: mgather_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, mu
+; RV64-NEXT: vwmulsu.vx v16, v8, a1
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
@@ -2104,11 +2111,11 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(ptr %base, <vs
define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v16, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
@@ -2120,11 +2127,11 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vs
define <vscale x 8 x double> @mgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -2144,11 +2151,11 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale
define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -2169,11 +2176,11 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <v
define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v12, v8
-; CHECK-NEXT: vsll.vi v8, v12, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v12, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
@@ -2194,10 +2201,11 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8i32_nxv8f64(ptr %base, <vscale
;
; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
@@ -2217,10 +2225,11 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(ptr %base, <v
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
@@ -2241,10 +2250,11 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(ptr %base, <v
;
; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vzext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 77a1f508d22184..3cf7cc9cb51526 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -581,11 +581,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %
define void @mscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v14, v12
-; CHECK-NEXT: vsll.vi v12, v14, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v14, v12, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -596,10 +596,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %
define void @mscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i32:
@@ -618,10 +619,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base
define void @mscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32:
@@ -641,10 +643,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @mscatter_baseidx_zext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v16, v12
-; CHECK-NEXT: vsll.vi v12, v16, 2
-; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v12, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -662,10 +665,9 @@ define void @mscatter_baseidx_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscal
;
; RV64-LABEL: mscatter_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a1
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
@@ -817,11 +819,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %
define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v18, v16
-; CHECK-NEXT: vsll.vi v16, v18, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v18, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -832,11 +834,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %
define void @mscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i64:
@@ -854,11 +856,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base
define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64:
@@ -877,11 +879,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v20, v16
-; CHECK-NEXT: vsll.vi v16, v20, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v20, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -900,10 +902,11 @@ define void @mscatter_baseidx_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr %base
;
; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> %val, <vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m)
@@ -921,10 +924,11 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -943,10 +947,11 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1475,11 +1480,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr
define void @mscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v14, v12
-; CHECK-NEXT: vsll.vi v12, v14, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v14, v12, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1490,10 +1495,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr
define void @mscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f32:
@@ -1512,10 +1518,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %ba
define void @mscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32:
@@ -1535,10 +1542,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, pt
define void @mscatter_baseidx_zext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v16, v12
-; CHECK-NEXT: vsll.vi v12, v16, 2
-; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v12, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1556,10 +1564,9 @@ define void @mscatter_baseidx_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vsc
;
; RV64-LABEL: mscatter_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a1
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
@@ -1711,11 +1718,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, pt
define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v18, v16
-; CHECK-NEXT: vsll.vi v16, v18, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v18, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1726,11 +1733,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, pt
define void @mscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f64:
@@ -1748,11 +1755,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %b
define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64:
@@ -1771,11 +1778,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, p
define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v20, v16
-; CHECK-NEXT: vsll.vi v16, v20, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v20, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1794,10 +1801,11 @@ define void @mscatter_baseidx_nxv8i32_nxv8f64(<vscale x 8 x double> %val, ptr %b
;
; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0(<vscale x 8 x double> %val, <vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m)
@@ -1815,10 +1823,11 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, p
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1837,10 +1846,11 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, p
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2015,15 +2025,15 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vslidedown.vx v7, v0, a1
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v24, v8
-; RV32-NEXT: vsll.vi v8, v24, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v8, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vsoxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsoxei32.v v16, (a0), v12, v0.t
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index abe7bdad8125ae..1007d1ce649cc4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -799,18 +799,18 @@ define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i8_nxv8i32(ptr %base, <vsca
define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v12, v10, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v12, v10, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
@@ -823,10 +823,11 @@ define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vsca
define <vscale x 8 x i32> @vpgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i32:
@@ -845,10 +846,11 @@ define <vscale x 8 x i32> @vpgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x
define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32:
@@ -868,18 +870,20 @@ define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vsc
define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v8, v12, 2
-; RV64-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -897,10 +901,9 @@ define <vscale x 8 x i32> @vpgather_baseidx_nxv8i32(ptr %base, <vscale x 8 x i32
;
; RV64-LABEL: vpgather_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
@@ -1049,18 +1052,18 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i8_nxv8i64(ptr %base, <vsca
define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v16, v10, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v16, v10, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -1073,9 +1076,9 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vsca
define <vscale x 8 x i64> @vpgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -1095,9 +1098,9 @@ define <vscale x 8 x i64> @vpgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x
define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -1118,18 +1121,18 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vsc
define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v16, v12, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -1150,10 +1153,11 @@ define <vscale x 8 x i64> @vpgather_baseidx_nxv8i32_nxv8i64(ptr %base, <vscale x
;
; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -1171,10 +1175,11 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i32_nxv8i64(ptr %base, <vsc
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1193,10 +1198,11 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i32_nxv8i64(ptr %base, <vsc
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1742,18 +1748,18 @@ define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i8_nxv8f32(ptr %base, <vs
define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v12, v10, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v12, v10, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
@@ -1766,10 +1772,11 @@ define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vs
define <vscale x 8 x float> @vpgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f32:
@@ -1788,10 +1795,11 @@ define <vscale x 8 x float> @vpgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale
define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32:
@@ -1811,18 +1819,20 @@ define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <v
define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v8, v12, 2
-; RV64-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1840,10 +1850,9 @@ define <vscale x 8 x float> @vpgather_baseidx_nxv8f32(ptr %base, <vscale x 8 x i
;
; RV64-LABEL: vpgather_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
@@ -1992,18 +2001,18 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i8_nxv6f64(ptr %base, <v
define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i8_nxv6f64(ptr %base, <vscale x 6 x i8> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv6i8_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v16, v10, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv6i8_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v16, v10, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2016,9 +2025,9 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i8_nxv6f64(ptr %base, <v
define <vscale x 6 x double> @vpgather_baseidx_nxv6i16_nxv6f64(ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2038,9 +2047,9 @@ define <vscale x 6 x double> @vpgather_baseidx_nxv6i16_nxv6f64(ptr %base, <vscal
define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i16_nxv6f64(ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2061,18 +2070,18 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i16_nxv6f64(ptr %base, <
define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i16_nxv6f64(ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv6i16_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v16, v12, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2093,10 +2102,11 @@ define <vscale x 6 x double> @vpgather_baseidx_nxv6i32_nxv6f64(ptr %base, <vscal
;
; RV64-LABEL: vpgather_baseidx_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i32> %idxs
%v = call <vscale x 6 x double> @llvm.vp.gather.nxv6f64.nxv6p0(<vscale x 6 x ptr> %ptrs, <vscale x 6 x i1> %m, i32 %evl)
@@ -2114,10 +2124,11 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i32_nxv6f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_sext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -2136,10 +2147,11 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i32_nxv6f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_zext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -2235,18 +2247,18 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i8_nxv8f64(ptr %base, <v
define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v16, v10, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v16, v10, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2259,9 +2271,9 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <v
define <vscale x 8 x double> @vpgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2281,9 +2293,9 @@ define <vscale x 8 x double> @vpgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscal
define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2304,18 +2316,18 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <
define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v16, v12, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2336,10 +2348,11 @@ define <vscale x 8 x double> @vpgather_baseidx_nxv8i32_nxv8f64(ptr %base, <vscal
;
; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -2357,10 +2370,11 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i32_nxv8f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2379,10 +2393,11 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i32_nxv8f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2465,27 +2480,26 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv1r.v v12, v0
-; RV32-NEXT: vsext.vf2 v16, v8
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: vsll.vi v24, v16, 3
-; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: srli a4, a2, 3
-; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a4
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v8, a3
+; RV32-NEXT: mv a3, a1
; RV32-NEXT: bltu a1, a2, .LBB112_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: mv a1, a2
+; RV32-NEXT: mv a3, a2
; RV32-NEXT: .LBB112_2:
-; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: sub a3, a1, a2
+; RV32-NEXT: srli a2, a2, 3
+; RV32-NEXT: sltu a1, a1, a3
+; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
@@ -2523,27 +2537,26 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv1r.v v12, v0
-; RV32-NEXT: vsext.vf2 v16, v8
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: vsll.vi v24, v16, 3
-; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: srli a4, a2, 3
-; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a4
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v8, a3
+; RV32-NEXT: mv a3, a1
; RV32-NEXT: bltu a1, a2, .LBB113_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: mv a1, a2
+; RV32-NEXT: mv a3, a2
; RV32-NEXT: .LBB113_2:
-; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: sub a3, a1, a2
+; RV32-NEXT: srli a2, a2, 3
+; RV32-NEXT: sltu a1, a1, a3
+; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
@@ -2582,52 +2595,50 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv1r.v v12, v0
-; RV32-NEXT: vzext.vf2 v16, v8
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: vsll.vi v24, v16, 3
-; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: srli a4, a2, 3
-; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a4
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulu.vx v24, v8, a3
+; RV32-NEXT: mv a3, a1
; RV32-NEXT: bltu a1, a2, .LBB114_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: mv a1, a2
+; RV32-NEXT: mv a3, a2
; RV32-NEXT: .LBB114_2:
-; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: sub a3, a1, a2
+; RV32-NEXT: srli a2, a2, 3
+; RV32-NEXT: sltu a1, a1, a3
+; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV64-NEXT: vmv1r.v v12, v0
-; RV64-NEXT: vzext.vf2 v16, v8
+; RV64-NEXT: li a3, 8
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: vsll.vi v24, v16, 3
-; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: srli a4, a2, 3
-; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a4
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
-; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV64-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV64-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v8, a3
+; RV64-NEXT: mv a3, a1
; RV64-NEXT: bltu a1, a2, .LBB114_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: mv a1, a2
+; RV64-NEXT: mv a3, a2
; RV64-NEXT: .LBB114_2:
-; RV64-NEXT: vmv1r.v v0, v12
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV64-NEXT: sub a3, a1, a2
+; RV64-NEXT: srli a2, a2, 3
+; RV64-NEXT: sltu a1, a1, a3
+; RV64-NEXT: addi a1, a1, -1
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vx v0, v0, a2
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i64> %eidxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index 647e3965b7ec27..2cf6248c175981 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -659,20 +659,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v14, v12
-; RV32-NEXT: vsll.vi v12, v14, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v14, v12, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v14, v12
-; RV64-NEXT: vsll.vi v12, v14, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v14, v12, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -683,10 +683,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @vpscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32:
@@ -705,10 +706,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %bas
define void @vpscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32:
@@ -728,18 +730,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v12, v16, 2
-; RV64-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v12, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -757,10 +761,9 @@ define void @vpscatter_baseidx_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vsca
;
; RV64-LABEL: vpscatter_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a2
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
@@ -904,20 +907,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v18, v16
-; RV32-NEXT: vsll.vi v16, v18, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v18, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v18, v16
-; RV64-NEXT: vsll.vi v16, v18, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v18, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -928,11 +931,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @vpscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64:
@@ -950,11 +953,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %bas
define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64:
@@ -973,20 +976,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v20, v16
-; RV64-NEXT: vsll.vi v16, v20, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v20, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1005,10 +1008,11 @@ define void @vpscatter_baseidx_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr %bas
;
; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.vp.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> %val, <vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -1026,10 +1030,11 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1048,10 +1053,11 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1572,20 +1578,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, pt
define void @vpscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v14, v12
-; RV32-NEXT: vsll.vi v12, v14, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v14, v12, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v14, v12
-; RV64-NEXT: vsll.vi v12, v14, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v14, v12, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1596,10 +1602,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, pt
define void @vpscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32:
@@ -1618,10 +1625,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %b
define void @vpscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32:
@@ -1641,18 +1649,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, p
define void @vpscatter_baseidx_zext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v12, v16, 2
-; RV64-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v12, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1670,10 +1680,9 @@ define void @vpscatter_baseidx_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vs
;
; RV64-LABEL: vpscatter_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a2
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
@@ -1817,20 +1826,20 @@ define void @vpscatter_baseidx_sext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, p
define void @vpscatter_baseidx_zext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i8> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv6i8_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v18, v16
-; RV32-NEXT: vsll.vi v16, v18, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v18, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv6i8_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v18, v16
-; RV64-NEXT: vsll.vi v16, v18, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v18, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i8> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -1841,11 +1850,11 @@ define void @vpscatter_baseidx_zext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, p
define void @vpscatter_baseidx_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv6i16_nxv6f64:
@@ -1863,11 +1872,11 @@ define void @vpscatter_baseidx_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %
define void @vpscatter_baseidx_sext_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv6i16_nxv6f64:
@@ -1886,20 +1895,20 @@ define void @vpscatter_baseidx_sext_nxv6i16_nxv6f64(<vscale x 6 x double> %val,
define void @vpscatter_baseidx_zext_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv6i16_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v20, v16
-; RV64-NEXT: vsll.vi v16, v20, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v20, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i16> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -1918,10 +1927,11 @@ define void @vpscatter_baseidx_nxv6i32_nxv6f64(<vscale x 6 x double> %val, ptr %
;
; RV64-LABEL: vpscatter_baseidx_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i32> %idxs
call void @llvm.vp.scatter.nxv6f64.nxv6p0(<vscale x 6 x double> %val, <vscale x 6 x ptr> %ptrs, <vscale x 6 x i1> %m, i32 %evl)
@@ -1939,10 +1949,11 @@ define void @vpscatter_baseidx_sext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -1961,10 +1972,11 @@ define void @vpscatter_baseidx_zext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -2059,20 +2071,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, p
define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v18, v16
-; RV32-NEXT: vsll.vi v16, v18, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v18, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v18, v16
-; RV64-NEXT: vsll.vi v16, v18, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v18, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2083,11 +2095,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, p
define void @vpscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64:
@@ -2105,11 +2117,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %
define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64:
@@ -2128,20 +2140,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val,
define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v20, v16
-; RV64-NEXT: vsll.vi v16, v20, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v20, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2160,10 +2172,11 @@ define void @vpscatter_baseidx_nxv8i32_nxv8f64(<vscale x 8 x double> %val, ptr %
;
; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.vp.scatter.nxv8f64.nxv8p0(<vscale x 8 x double> %val, <vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -2181,10 +2194,11 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2203,10 +2217,11 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2307,25 +2322,16 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vl4re16.v v24, (a1)
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v0, v24
+; RV32-NEXT: vl4re16.v v4, (a1)
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: vsll.vi v24, v0, 3
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v4, a3
; RV32-NEXT: mv a3, a2
; RV32-NEXT: bltu a2, a1, .LBB109_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a1
; RV32-NEXT: .LBB109_2:
-; RV32-NEXT: addi a4, sp, 16
-; RV32-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: sub a3, a2, a1
@@ -2337,11 +2343,6 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv16i16_nxv16f64:
@@ -2404,25 +2405,16 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %val, ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vl4re16.v v24, (a1)
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v0, v24
+; RV32-NEXT: vl4re16.v v4, (a1)
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: vsll.vi v24, v0, 3
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v4, a3
; RV32-NEXT: mv a3, a2
; RV32-NEXT: bltu a2, a1, .LBB110_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a1
; RV32-NEXT: .LBB110_2:
-; RV32-NEXT: addi a4, sp, 16
-; RV32-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: sub a3, a2, a1
@@ -2434,11 +2426,6 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv16i16_nxv16f64:
@@ -2502,25 +2489,16 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %val, ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vl4re16.v v24, (a1)
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vzext.vf2 v0, v24
+; RV32-NEXT: vl4re16.v v4, (a1)
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: vsll.vi v24, v0, 3
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulu.vx v24, v4, a3
; RV32-NEXT: mv a3, a2
; RV32-NEXT: bltu a2, a1, .LBB111_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a1
; RV32-NEXT: .LBB111_2:
-; RV32-NEXT: addi a4, sp, 16
-; RV32-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: sub a3, a2, a1
@@ -2532,34 +2510,20 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV64-NEXT: vl4re16.v v24, (a1)
-; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v0, v24
+; RV64-NEXT: vl4re16.v v4, (a1)
+; RV64-NEXT: li a3, 8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: vsll.vi v24, v0, 3
+; RV64-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v4, a3
; RV64-NEXT: mv a3, a2
; RV64-NEXT: bltu a2, a1, .LBB111_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a3, a1
; RV64-NEXT: .LBB111_2:
-; RV64-NEXT: addi a4, sp, 16
-; RV64-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV64-NEXT: sub a3, a2, a1
@@ -2571,11 +2535,6 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: .cfi_def_cfa sp, 16
-; RV64-NEXT: addi sp, sp, 16
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%eidxs = zext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i64> %eidxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
index ff807adf0e59f9..9524eaacb2eb75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
@@ -222,13 +222,6 @@ define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_zext(<vscale x 2 x i32> %a, i8 %b
}
define <vscale x 2 x i64> @vwsll_vi_nxv2i64(<vscale x 2 x i32> %a) {
-; CHECK-LABEL: vwsll_vi_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
-; CHECK-NEXT: ret
-;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64:
; CHECK-ZVBB: # %bb.0:
; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma
@@ -432,9 +425,10 @@ define <vscale x 4 x i32> @vwsll_vx_i8_nxv4i32_zext(<vscale x 4 x i16> %a, i8 %b
define <vscale x 4 x i32> @vwsll_vi_nxv4i32(<vscale x 4 x i16> %a) {
; CHECK-LABEL: vwsll_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv4i32:
@@ -612,9 +606,10 @@ define <vscale x 8 x i16> @vwsll_vx_i8_nxv8i16_zext(<vscale x 8 x i8> %a, i8 %b)
define <vscale x 8 x i16> @vwsll_vi_nxv8i16(<vscale x 8 x i8> %a) {
; CHECK-LABEL: vwsll_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv8i16:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
index c30c4763dd46d5..f1dc696510cfc7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
@@ -238,12 +238,20 @@ define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_zext(<vscale x 2 x i32> %a, i8 %b
}
define <vscale x 2 x i64> @vwsll_vi_nxv2i64(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vwsll_vi_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t
-; CHECK-NEXT: ret
+; CHECK-RV32-LABEL: vwsll_vi_nxv2i64:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-RV32-NEXT: vzext.vf2 v10, v8
+; CHECK-RV32-NEXT: vsll.vi v8, v10, 2, v0.t
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: vwsll_vi_nxv2i64:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: li a1, 4
+; CHECK-RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-RV64-NEXT: vwmulu.vx v10, v8, a1, v0.t
+; CHECK-RV64-NEXT: vmv2r.v v8, v10
+; CHECK-RV64-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64:
; CHECK-ZVBB: # %bb.0:
@@ -464,9 +472,10 @@ define <vscale x 4 x i32> @vwsll_vx_i8_nxv4i32_zext(<vscale x 4 x i16> %a, i8 %b
define <vscale x 4 x i32> @vwsll_vi_nxv4i32(<vscale x 4 x i16> %a, <vscale x 4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vwsll_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv4i32:
@@ -661,9 +670,10 @@ define <vscale x 8 x i16> @vwsll_vx_i8_nxv8i16_zext(<vscale x 8 x i8> %a, i8 %b,
define <vscale x 8 x i16> @vwsll_vi_nxv8i16(<vscale x 8 x i8> %a, <vscale x 8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vwsll_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv8i16:
>From 5aa9da4f536c337a8979704aa46c8fc768cabec5 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Wed, 8 Jan 2025 17:55:35 +0100
Subject: [PATCH 2/5] [RISCV] Fold vector shift to widening multiply on rv32
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 9 +++++++--
llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll | 8 ++++++++
llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll | 21 +++++++--------------
3 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 955a15393ca8a1..8e44ab33f66da6 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17370,13 +17370,18 @@ static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SDValue RHS = N->getOperand(1);
APInt ShAmt;
- if (!ISD::isConstantSplatVector(RHS.getNode(), ShAmt))
+ uint64_t ShAmtInt;
+ if (ISD::isConstantSplatVector(RHS.getNode(), ShAmt))
+ ShAmtInt = ShAmt.getZExtValue();
+ else if (RHS.getOpcode() == RISCVISD::VMV_V_X_VL &&
+ RHS.getOperand(1).getOpcode() == ISD::Constant)
+ ShAmtInt = RHS.getConstantOperandVal(1);
+ else
return SDValue();
// Better foldings:
// (shl (sext x), 1) -> (vwadd x, x)
// (shl (zext x), 1) -> (vwaddu x, x)
- uint64_t ShAmtInt = ShAmt.getZExtValue();
if (ShAmtInt <= 1)
return SDValue();
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
index 9524eaacb2eb75..fd09fe791b4fdb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
@@ -222,6 +222,14 @@ define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_zext(<vscale x 2 x i32> %a, i8 %b
}
define <vscale x 2 x i64> @vwsll_vi_nxv2i64(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: vwsll_vi_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
+;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64:
; CHECK-ZVBB: # %bb.0:
; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
index f1dc696510cfc7..1358a7c69cb8a1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
@@ -238,20 +238,13 @@ define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_zext(<vscale x 2 x i32> %a, i8 %b
}
define <vscale x 2 x i64> @vwsll_vi_nxv2i64(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
-; CHECK-RV32-LABEL: vwsll_vi_nxv2i64:
-; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-RV32-NEXT: vzext.vf2 v10, v8
-; CHECK-RV32-NEXT: vsll.vi v8, v10, 2, v0.t
-; CHECK-RV32-NEXT: ret
-;
-; CHECK-RV64-LABEL: vwsll_vi_nxv2i64:
-; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: li a1, 4
-; CHECK-RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-RV64-NEXT: vwmulu.vx v10, v8, a1, v0.t
-; CHECK-RV64-NEXT: vmv2r.v v8, v10
-; CHECK-RV64-NEXT: ret
+; CHECK-LABEL: vwsll_vi_nxv2i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64:
; CHECK-ZVBB: # %bb.0:
>From 1868507d3f968466ebf040d89b11065418b045a3 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Wed, 8 Jan 2025 18:07:34 +0100
Subject: [PATCH 3/5] [RISCV] Use `getSimpleValueType`
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8e44ab33f66da6..aa623fae600e48 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17386,8 +17386,7 @@ static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
return SDValue();
SDValue NarrowOp = LHS.getOperand(0);
- EVT NarrowVT = NarrowOp.getValueType();
- uint64_t NarrowBits = NarrowVT.getScalarSizeInBits();
+ uint64_t NarrowBits = NarrowOp.getSimpleValueType().getScalarSizeInBits();
if (ShAmtInt >= NarrowBits)
return SDValue();
EVT VT = N->getValueType(0);
@@ -17413,9 +17412,10 @@ static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
default:
llvm_unreachable("Expected SHL");
}
- return DAG.getNode(Opcode, DL, VT, NarrowOp,
- DAG.getConstant(1ULL << ShAmtInt, SDLoc(RHS), NarrowVT),
- Passthru, Mask, VL);
+ return DAG.getNode(
+ Opcode, DL, VT, NarrowOp,
+ DAG.getConstant(1ULL << ShAmtInt, SDLoc(RHS), NarrowOp.getValueType()),
+ Passthru, Mask, VL);
}
SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
>From 3a59255be01ab61daca0274958face9efba45596 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Wed, 8 Jan 2025 18:23:56 +0100
Subject: [PATCH 4/5] [RISCV] Add a TODO comment
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index aa623fae600e48..1daddc4dc4f29e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17399,7 +17399,7 @@ static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
switch (N->getOpcode()) {
case ISD::SHL:
if (!VT.isScalableVector())
- return SDValue();
+ return SDValue(); // TODO: handle fixed length vectors
Passthru = DAG.getUNDEF(VT);
std::tie(Mask, VL) =
getDefaultScalableVLOps(VT.getSimpleVT(), DL, DAG, Subtarget);
>From 174e59e5be1d1c85d2e361551f96e6b542dd38e9 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Thu, 9 Jan 2025 15:00:30 +0100
Subject: [PATCH 5/5] [RISCV] Handle fixed-length vectors
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 33 +-
.../RISCV/rvv/fixed-vectors-masked-gather.ll | 432 ++++++------
.../RISCV/rvv/fixed-vectors-masked-scatter.ll | 636 +++++++++---------
.../RISCV/rvv/fixed-vectors-vpgather.ll | 149 ++--
.../RISCV/rvv/fixed-vectors-vpscatter.ll | 63 +-
.../CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll | 50 +-
6 files changed, 698 insertions(+), 665 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1daddc4dc4f29e..27b01748d49c3d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17364,6 +17364,9 @@ static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
case ISD::ZERO_EXTEND:
Opcode = RISCVISD::VWMULU_VL;
break;
+ // TODO:
+ // case RISCVISD::VSEXT_VL:
+ // case RISCVISD::VZEXT_VL:
default:
return SDValue();
}
@@ -17386,23 +17389,30 @@ static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
return SDValue();
SDValue NarrowOp = LHS.getOperand(0);
- uint64_t NarrowBits = NarrowOp.getSimpleValueType().getScalarSizeInBits();
+ MVT NarrowVT = NarrowOp.getSimpleValueType();
+ uint64_t NarrowBits = NarrowVT.getScalarSizeInBits();
if (ShAmtInt >= NarrowBits)
return SDValue();
- EVT VT = N->getValueType(0);
+ MVT VT = N->getSimpleValueType(0);
if (NarrowBits * 2 != VT.getScalarSizeInBits())
return SDValue();
SelectionDAG &DAG = DCI.DAG;
+ MVT NarrowContainerVT = NarrowVT;
+ MVT ContainerVT = VT;
SDLoc DL(N);
SDValue Passthru, Mask, VL;
switch (N->getOpcode()) {
case ISD::SHL:
- if (!VT.isScalableVector())
- return SDValue(); // TODO: handle fixed length vectors
+ if (VT.isFixedLengthVector()) {
+ NarrowContainerVT =
+ getContainerForFixedLengthVector(DAG, NarrowVT, Subtarget);
+ NarrowOp =
+ convertToScalableVector(NarrowContainerVT, NarrowOp, DAG, Subtarget);
+ ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
+ }
Passthru = DAG.getUNDEF(VT);
- std::tie(Mask, VL) =
- getDefaultScalableVLOps(VT.getSimpleVT(), DL, DAG, Subtarget);
+ std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
break;
case RISCVISD::SHL_VL:
Passthru = N->getOperand(2);
@@ -17412,10 +17422,13 @@ static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
default:
llvm_unreachable("Expected SHL");
}
- return DAG.getNode(
- Opcode, DL, VT, NarrowOp,
- DAG.getConstant(1ULL << ShAmtInt, SDLoc(RHS), NarrowOp.getValueType()),
- Passthru, Mask, VL);
+ SDValue Mul =
+ DAG.getNode(Opcode, DL, ContainerVT, NarrowOp,
+ DAG.getConstant(1ULL << ShAmtInt, SDLoc(RHS), ContainerVT),
+ Passthru, Mask, VL);
+ if (VT.isFixedLengthVector())
+ return convertFromScalableVector(VT, Mul, DAG, Subtarget);
+ return Mul;
}
SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index c6e12c52122d27..9b34f1137ca66d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -4943,16 +4943,17 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-LABEL: mgather_baseidx_v8i16_v8i64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li a1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
; RV32ZVE32F-NEXT: andi a3, t0, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, a1, v8
; RV32ZVE32F-NEXT: beqz a3, .LBB51_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vmv.x.s a3, v8
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a3, v10
; RV32ZVE32F-NEXT: lw a1, 0(a3)
; RV32ZVE32F-NEXT: lw a3, 4(a3)
; RV32ZVE32F-NEXT: andi a4, t0, 2
@@ -4988,40 +4989,40 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: beqz a4, .LBB51_2
; RV32ZVE32F-NEXT: .LBB51_8: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a5, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a5, v8
; RV32ZVE32F-NEXT: lw a4, 0(a5)
; RV32ZVE32F-NEXT: lw a5, 4(a5)
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB51_3
; RV32ZVE32F-NEXT: .LBB51_9: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a7, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a7, v8
; RV32ZVE32F-NEXT: lw a6, 0(a7)
; RV32ZVE32F-NEXT: lw a7, 4(a7)
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB51_4
; RV32ZVE32F-NEXT: .LBB51_10: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s t2, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s t2, v8
; RV32ZVE32F-NEXT: lw t1, 0(t2)
; RV32ZVE32F-NEXT: lw t2, 4(t2)
; RV32ZVE32F-NEXT: andi t3, t0, 16
; RV32ZVE32F-NEXT: beqz t3, .LBB51_5
; RV32ZVE32F-NEXT: .LBB51_11: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s t4, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s t4, v8
; RV32ZVE32F-NEXT: lw t3, 0(t4)
; RV32ZVE32F-NEXT: lw t4, 4(t4)
; RV32ZVE32F-NEXT: andi t5, t0, 32
; RV32ZVE32F-NEXT: beqz t5, .LBB51_6
; RV32ZVE32F-NEXT: .LBB51_12: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s t6, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s t6, v8
; RV32ZVE32F-NEXT: lw t5, 0(t6)
; RV32ZVE32F-NEXT: lw t6, 4(t6)
; RV32ZVE32F-NEXT: .LBB51_13: # %else14
@@ -5035,8 +5036,8 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: beqz s0, .LBB51_16
; RV32ZVE32F-NEXT: # %bb.14: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s s1, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s s1, v8
; RV32ZVE32F-NEXT: lw s0, 0(s1)
; RV32ZVE32F-NEXT: lw s1, 4(s1)
; RV32ZVE32F-NEXT: andi t0, t0, -128
@@ -5052,7 +5053,7 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: beqz t0, .LBB51_15
; RV32ZVE32F-NEXT: .LBB51_17: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: lw t0, 0(a2)
; RV32ZVE32F-NEXT: lw a2, 4(a2)
@@ -5221,16 +5222,17 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8i64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li a1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
; RV32ZVE32F-NEXT: andi a3, t0, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, a1, v8
; RV32ZVE32F-NEXT: beqz a3, .LBB52_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vmv.x.s a3, v8
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a3, v10
; RV32ZVE32F-NEXT: lw a1, 0(a3)
; RV32ZVE32F-NEXT: lw a3, 4(a3)
; RV32ZVE32F-NEXT: andi a4, t0, 2
@@ -5266,40 +5268,40 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: beqz a4, .LBB52_2
; RV32ZVE32F-NEXT: .LBB52_8: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a5, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a5, v8
; RV32ZVE32F-NEXT: lw a4, 0(a5)
; RV32ZVE32F-NEXT: lw a5, 4(a5)
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB52_3
; RV32ZVE32F-NEXT: .LBB52_9: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a7, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a7, v8
; RV32ZVE32F-NEXT: lw a6, 0(a7)
; RV32ZVE32F-NEXT: lw a7, 4(a7)
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB52_4
; RV32ZVE32F-NEXT: .LBB52_10: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s t2, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s t2, v8
; RV32ZVE32F-NEXT: lw t1, 0(t2)
; RV32ZVE32F-NEXT: lw t2, 4(t2)
; RV32ZVE32F-NEXT: andi t3, t0, 16
; RV32ZVE32F-NEXT: beqz t3, .LBB52_5
; RV32ZVE32F-NEXT: .LBB52_11: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s t4, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s t4, v8
; RV32ZVE32F-NEXT: lw t3, 0(t4)
; RV32ZVE32F-NEXT: lw t4, 4(t4)
; RV32ZVE32F-NEXT: andi t5, t0, 32
; RV32ZVE32F-NEXT: beqz t5, .LBB52_6
; RV32ZVE32F-NEXT: .LBB52_12: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s t6, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s t6, v8
; RV32ZVE32F-NEXT: lw t5, 0(t6)
; RV32ZVE32F-NEXT: lw t6, 4(t6)
; RV32ZVE32F-NEXT: .LBB52_13: # %else14
@@ -5313,8 +5315,8 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: beqz s0, .LBB52_16
; RV32ZVE32F-NEXT: # %bb.14: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s s1, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s s1, v8
; RV32ZVE32F-NEXT: lw s0, 0(s1)
; RV32ZVE32F-NEXT: lw s1, 4(s1)
; RV32ZVE32F-NEXT: andi t0, t0, -128
@@ -5330,7 +5332,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: beqz t0, .LBB52_15
; RV32ZVE32F-NEXT: .LBB52_17: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: lw t0, 0(a2)
; RV32ZVE32F-NEXT: lw a2, 4(a2)
@@ -5501,16 +5503,17 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8i64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li a1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
; RV32ZVE32F-NEXT: andi a3, t0, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccu.vx v10, a1, v8
; RV32ZVE32F-NEXT: beqz a3, .LBB53_7
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
-; RV32ZVE32F-NEXT: vmv.x.s a3, v8
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a3, v10
; RV32ZVE32F-NEXT: lw a1, 0(a3)
; RV32ZVE32F-NEXT: lw a3, 4(a3)
; RV32ZVE32F-NEXT: andi a4, t0, 2
@@ -5546,40 +5549,40 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: beqz a4, .LBB53_2
; RV32ZVE32F-NEXT: .LBB53_8: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a5, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a5, v8
; RV32ZVE32F-NEXT: lw a4, 0(a5)
; RV32ZVE32F-NEXT: lw a5, 4(a5)
; RV32ZVE32F-NEXT: andi a6, t0, 4
; RV32ZVE32F-NEXT: beqz a6, .LBB53_3
; RV32ZVE32F-NEXT: .LBB53_9: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a7, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a7, v8
; RV32ZVE32F-NEXT: lw a6, 0(a7)
; RV32ZVE32F-NEXT: lw a7, 4(a7)
; RV32ZVE32F-NEXT: andi t1, t0, 8
; RV32ZVE32F-NEXT: beqz t1, .LBB53_4
; RV32ZVE32F-NEXT: .LBB53_10: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s t2, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s t2, v8
; RV32ZVE32F-NEXT: lw t1, 0(t2)
; RV32ZVE32F-NEXT: lw t2, 4(t2)
; RV32ZVE32F-NEXT: andi t3, t0, 16
; RV32ZVE32F-NEXT: beqz t3, .LBB53_5
; RV32ZVE32F-NEXT: .LBB53_11: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s t4, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s t4, v8
; RV32ZVE32F-NEXT: lw t3, 0(t4)
; RV32ZVE32F-NEXT: lw t4, 4(t4)
; RV32ZVE32F-NEXT: andi t5, t0, 32
; RV32ZVE32F-NEXT: beqz t5, .LBB53_6
; RV32ZVE32F-NEXT: .LBB53_12: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s t6, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s t6, v8
; RV32ZVE32F-NEXT: lw t5, 0(t6)
; RV32ZVE32F-NEXT: lw t6, 4(t6)
; RV32ZVE32F-NEXT: .LBB53_13: # %else14
@@ -5593,8 +5596,8 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: beqz s0, .LBB53_16
; RV32ZVE32F-NEXT: # %bb.14: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s s1, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s s1, v8
; RV32ZVE32F-NEXT: lw s0, 0(s1)
; RV32ZVE32F-NEXT: lw s1, 4(s1)
; RV32ZVE32F-NEXT: andi t0, t0, -128
@@ -5610,7 +5613,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: beqz t0, .LBB53_15
; RV32ZVE32F-NEXT: .LBB53_17: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: lw t0, 0(a2)
; RV32ZVE32F-NEXT: lw a2, 4(a2)
@@ -11947,38 +11950,38 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV32ZVE32F-LABEL: mgather_baseidx_v8i16_v8f64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li a2, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: andi a3, a2, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a3, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, a2, v8
; RV32ZVE32F-NEXT: bnez a3, .LBB100_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a2, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB100_11
+; RV32ZVE32F-NEXT: andi a2, a1, 2
+; RV32ZVE32F-NEXT: bnez a2, .LBB100_11
; RV32ZVE32F-NEXT: .LBB100_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a2, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB100_12
+; RV32ZVE32F-NEXT: andi a2, a1, 4
+; RV32ZVE32F-NEXT: bnez a2, .LBB100_12
; RV32ZVE32F-NEXT: .LBB100_3: # %else5
-; RV32ZVE32F-NEXT: andi a1, a2, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB100_13
+; RV32ZVE32F-NEXT: andi a2, a1, 8
+; RV32ZVE32F-NEXT: bnez a2, .LBB100_13
; RV32ZVE32F-NEXT: .LBB100_4: # %else8
-; RV32ZVE32F-NEXT: andi a1, a2, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB100_14
+; RV32ZVE32F-NEXT: andi a2, a1, 16
+; RV32ZVE32F-NEXT: bnez a2, .LBB100_14
; RV32ZVE32F-NEXT: .LBB100_5: # %else11
-; RV32ZVE32F-NEXT: andi a1, a2, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB100_15
+; RV32ZVE32F-NEXT: andi a2, a1, 32
+; RV32ZVE32F-NEXT: bnez a2, .LBB100_15
; RV32ZVE32F-NEXT: .LBB100_6: # %else14
-; RV32ZVE32F-NEXT: andi a1, a2, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB100_16
+; RV32ZVE32F-NEXT: andi a2, a1, 64
+; RV32ZVE32F-NEXT: bnez a2, .LBB100_16
; RV32ZVE32F-NEXT: .LBB100_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a2, -128
+; RV32ZVE32F-NEXT: andi a1, a1, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB100_9
; RV32ZVE32F-NEXT: .LBB100_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fld fa7, 0(a1)
; RV32ZVE32F-NEXT: .LBB100_9: # %else20
@@ -11992,51 +11995,52 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB100_10: # %cond.load
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB100_2
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a2, v10
+; RV32ZVE32F-NEXT: fld fa0, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 2
+; RV32ZVE32F-NEXT: beqz a2, .LBB100_2
; RV32ZVE32F-NEXT: .LBB100_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB100_3
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa1, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 4
+; RV32ZVE32F-NEXT: beqz a2, .LBB100_3
; RV32ZVE32F-NEXT: .LBB100_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB100_4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa2, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 8
+; RV32ZVE32F-NEXT: beqz a2, .LBB100_4
; RV32ZVE32F-NEXT: .LBB100_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB100_5
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa3, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 16
+; RV32ZVE32F-NEXT: beqz a2, .LBB100_5
; RV32ZVE32F-NEXT: .LBB100_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB100_6
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa4, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 32
+; RV32ZVE32F-NEXT: beqz a2, .LBB100_6
; RV32ZVE32F-NEXT: .LBB100_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB100_7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa5, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 64
+; RV32ZVE32F-NEXT: beqz a2, .LBB100_7
; RV32ZVE32F-NEXT: .LBB100_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, -128
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa6, 0(a2)
+; RV32ZVE32F-NEXT: andi a1, a1, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB100_8
; RV32ZVE32F-NEXT: j .LBB100_9
;
@@ -12163,38 +12167,38 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-LABEL: mgather_baseidx_sext_v8i16_v8f64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li a2, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: andi a3, a2, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a3, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, a2, v8
; RV32ZVE32F-NEXT: bnez a3, .LBB101_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a2, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB101_11
+; RV32ZVE32F-NEXT: andi a2, a1, 2
+; RV32ZVE32F-NEXT: bnez a2, .LBB101_11
; RV32ZVE32F-NEXT: .LBB101_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a2, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB101_12
+; RV32ZVE32F-NEXT: andi a2, a1, 4
+; RV32ZVE32F-NEXT: bnez a2, .LBB101_12
; RV32ZVE32F-NEXT: .LBB101_3: # %else5
-; RV32ZVE32F-NEXT: andi a1, a2, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB101_13
+; RV32ZVE32F-NEXT: andi a2, a1, 8
+; RV32ZVE32F-NEXT: bnez a2, .LBB101_13
; RV32ZVE32F-NEXT: .LBB101_4: # %else8
-; RV32ZVE32F-NEXT: andi a1, a2, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB101_14
+; RV32ZVE32F-NEXT: andi a2, a1, 16
+; RV32ZVE32F-NEXT: bnez a2, .LBB101_14
; RV32ZVE32F-NEXT: .LBB101_5: # %else11
-; RV32ZVE32F-NEXT: andi a1, a2, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB101_15
+; RV32ZVE32F-NEXT: andi a2, a1, 32
+; RV32ZVE32F-NEXT: bnez a2, .LBB101_15
; RV32ZVE32F-NEXT: .LBB101_6: # %else14
-; RV32ZVE32F-NEXT: andi a1, a2, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB101_16
+; RV32ZVE32F-NEXT: andi a2, a1, 64
+; RV32ZVE32F-NEXT: bnez a2, .LBB101_16
; RV32ZVE32F-NEXT: .LBB101_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a2, -128
+; RV32ZVE32F-NEXT: andi a1, a1, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB101_9
; RV32ZVE32F-NEXT: .LBB101_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fld fa7, 0(a1)
; RV32ZVE32F-NEXT: .LBB101_9: # %else20
@@ -12208,51 +12212,52 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB101_10: # %cond.load
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB101_2
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a2, v10
+; RV32ZVE32F-NEXT: fld fa0, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 2
+; RV32ZVE32F-NEXT: beqz a2, .LBB101_2
; RV32ZVE32F-NEXT: .LBB101_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB101_3
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa1, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 4
+; RV32ZVE32F-NEXT: beqz a2, .LBB101_3
; RV32ZVE32F-NEXT: .LBB101_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB101_4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa2, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 8
+; RV32ZVE32F-NEXT: beqz a2, .LBB101_4
; RV32ZVE32F-NEXT: .LBB101_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB101_5
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa3, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 16
+; RV32ZVE32F-NEXT: beqz a2, .LBB101_5
; RV32ZVE32F-NEXT: .LBB101_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB101_6
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa4, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 32
+; RV32ZVE32F-NEXT: beqz a2, .LBB101_6
; RV32ZVE32F-NEXT: .LBB101_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB101_7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa5, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 64
+; RV32ZVE32F-NEXT: beqz a2, .LBB101_7
; RV32ZVE32F-NEXT: .LBB101_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, -128
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa6, 0(a2)
+; RV32ZVE32F-NEXT: andi a1, a1, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB101_8
; RV32ZVE32F-NEXT: j .LBB101_9
;
@@ -12381,38 +12386,38 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-LABEL: mgather_baseidx_zext_v8i16_v8f64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li a2, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: andi a3, a2, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a3, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccu.vx v10, a2, v8
; RV32ZVE32F-NEXT: bnez a3, .LBB102_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a2, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB102_11
+; RV32ZVE32F-NEXT: andi a2, a1, 2
+; RV32ZVE32F-NEXT: bnez a2, .LBB102_11
; RV32ZVE32F-NEXT: .LBB102_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a2, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB102_12
+; RV32ZVE32F-NEXT: andi a2, a1, 4
+; RV32ZVE32F-NEXT: bnez a2, .LBB102_12
; RV32ZVE32F-NEXT: .LBB102_3: # %else5
-; RV32ZVE32F-NEXT: andi a1, a2, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB102_13
+; RV32ZVE32F-NEXT: andi a2, a1, 8
+; RV32ZVE32F-NEXT: bnez a2, .LBB102_13
; RV32ZVE32F-NEXT: .LBB102_4: # %else8
-; RV32ZVE32F-NEXT: andi a1, a2, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB102_14
+; RV32ZVE32F-NEXT: andi a2, a1, 16
+; RV32ZVE32F-NEXT: bnez a2, .LBB102_14
; RV32ZVE32F-NEXT: .LBB102_5: # %else11
-; RV32ZVE32F-NEXT: andi a1, a2, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB102_15
+; RV32ZVE32F-NEXT: andi a2, a1, 32
+; RV32ZVE32F-NEXT: bnez a2, .LBB102_15
; RV32ZVE32F-NEXT: .LBB102_6: # %else14
-; RV32ZVE32F-NEXT: andi a1, a2, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB102_16
+; RV32ZVE32F-NEXT: andi a2, a1, 64
+; RV32ZVE32F-NEXT: bnez a2, .LBB102_16
; RV32ZVE32F-NEXT: .LBB102_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a2, -128
+; RV32ZVE32F-NEXT: andi a1, a1, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB102_9
; RV32ZVE32F-NEXT: .LBB102_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a1, v8
; RV32ZVE32F-NEXT: fld fa7, 0(a1)
; RV32ZVE32F-NEXT: .LBB102_9: # %else20
@@ -12426,51 +12431,52 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB102_10: # %cond.load
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB102_2
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a2, v10
+; RV32ZVE32F-NEXT: fld fa0, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 2
+; RV32ZVE32F-NEXT: beqz a2, .LBB102_2
; RV32ZVE32F-NEXT: .LBB102_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB102_3
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa1, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 4
+; RV32ZVE32F-NEXT: beqz a2, .LBB102_3
; RV32ZVE32F-NEXT: .LBB102_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB102_4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa2, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 8
+; RV32ZVE32F-NEXT: beqz a2, .LBB102_4
; RV32ZVE32F-NEXT: .LBB102_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB102_5
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa3, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 16
+; RV32ZVE32F-NEXT: beqz a2, .LBB102_5
; RV32ZVE32F-NEXT: .LBB102_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB102_6
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa4, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 32
+; RV32ZVE32F-NEXT: beqz a2, .LBB102_6
; RV32ZVE32F-NEXT: .LBB102_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB102_7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa5, 0(a2)
+; RV32ZVE32F-NEXT: andi a2, a1, 64
+; RV32ZVE32F-NEXT: beqz a2, .LBB102_7
; RV32ZVE32F-NEXT: .LBB102_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a2, -128
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a2, v8
+; RV32ZVE32F-NEXT: fld fa6, 0(a2)
+; RV32ZVE32F-NEXT: andi a1, a1, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB102_8
; RV32ZVE32F-NEXT: j .LBB102_9
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 7ec47269257048..be6a8fc8bf52f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -4173,47 +4173,47 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: lw a7, 44(a0)
; RV32ZVE32F-NEXT: lw a4, 48(a0)
; RV32ZVE32F-NEXT: lw a5, 52(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 28(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 36(a0)
-; RV32ZVE32F-NEXT: lw s0, 8(a0)
-; RV32ZVE32F-NEXT: lw s1, 12(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw t6, 20(a0)
+; RV32ZVE32F-NEXT: lw t2, 24(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t0, 32(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw s0, 12(a0)
+; RV32ZVE32F-NEXT: lw t4, 16(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li s1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi s2, t0, 1
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi s2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, s1, v8
; RV32ZVE32F-NEXT: bnez s2, .LBB45_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, t0, 2
+; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB45_11
; RV32ZVE32F-NEXT: .LBB45_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, t0, 4
+; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB45_12
; RV32ZVE32F-NEXT: .LBB45_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, t0, 8
+; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB45_13
; RV32ZVE32F-NEXT: .LBB45_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, t0, 16
+; RV32ZVE32F-NEXT: andi a0, a1, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB45_14
; RV32ZVE32F-NEXT: .LBB45_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, t0, 32
+; RV32ZVE32F-NEXT: andi a0, a1, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB45_15
; RV32ZVE32F-NEXT: .LBB45_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, t0, 64
+; RV32ZVE32F-NEXT: andi a0, a1, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB45_16
; RV32ZVE32F-NEXT: .LBB45_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, t0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB45_9
; RV32ZVE32F-NEXT: .LBB45_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a2, 0(a0)
; RV32ZVE32F-NEXT: sw a3, 4(a0)
@@ -4229,60 +4229,61 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB45_10: # %cond.store
; RV32ZVE32F-NEXT: .cfi_restore_state
-; RV32ZVE32F-NEXT: lw a1, 0(a0)
+; RV32ZVE32F-NEXT: lw s1, 0(a0)
; RV32ZVE32F-NEXT: lw a0, 4(a0)
-; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw a1, 0(s2)
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s s2, v10
+; RV32ZVE32F-NEXT: sw s1, 0(s2)
; RV32ZVE32F-NEXT: sw a0, 4(s2)
-; RV32ZVE32F-NEXT: andi a0, t0, 2
+; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB45_2
; RV32ZVE32F-NEXT: .LBB45_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 0(a0)
-; RV32ZVE32F-NEXT: sw s1, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw s0, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB45_3
; RV32ZVE32F-NEXT: .LBB45_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 8
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB45_4
; RV32ZVE32F-NEXT: .LBB45_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 16
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB45_5
; RV32ZVE32F-NEXT: .LBB45_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 32
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB45_6
; RV32ZVE32F-NEXT: .LBB45_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a6, 0(a0)
; RV32ZVE32F-NEXT: sw a7, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 64
+; RV32ZVE32F-NEXT: andi a0, a1, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB45_7
; RV32ZVE32F-NEXT: .LBB45_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a4, 0(a0)
; RV32ZVE32F-NEXT: sw a5, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB45_8
; RV32ZVE32F-NEXT: j .LBB45_9
;
@@ -4424,47 +4425,47 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a7, 44(a0)
; RV32ZVE32F-NEXT: lw a4, 48(a0)
; RV32ZVE32F-NEXT: lw a5, 52(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 28(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 36(a0)
-; RV32ZVE32F-NEXT: lw s0, 8(a0)
-; RV32ZVE32F-NEXT: lw s1, 12(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw t6, 20(a0)
+; RV32ZVE32F-NEXT: lw t2, 24(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t0, 32(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw s0, 12(a0)
+; RV32ZVE32F-NEXT: lw t4, 16(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li s1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi s2, t0, 1
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi s2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, s1, v8
; RV32ZVE32F-NEXT: bnez s2, .LBB46_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, t0, 2
+; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB46_11
; RV32ZVE32F-NEXT: .LBB46_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, t0, 4
+; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB46_12
; RV32ZVE32F-NEXT: .LBB46_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, t0, 8
+; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB46_13
; RV32ZVE32F-NEXT: .LBB46_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, t0, 16
+; RV32ZVE32F-NEXT: andi a0, a1, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB46_14
; RV32ZVE32F-NEXT: .LBB46_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, t0, 32
+; RV32ZVE32F-NEXT: andi a0, a1, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB46_15
; RV32ZVE32F-NEXT: .LBB46_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, t0, 64
+; RV32ZVE32F-NEXT: andi a0, a1, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB46_16
; RV32ZVE32F-NEXT: .LBB46_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, t0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB46_9
; RV32ZVE32F-NEXT: .LBB46_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a2, 0(a0)
; RV32ZVE32F-NEXT: sw a3, 4(a0)
@@ -4480,60 +4481,61 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB46_10: # %cond.store
; RV32ZVE32F-NEXT: .cfi_restore_state
-; RV32ZVE32F-NEXT: lw a1, 0(a0)
+; RV32ZVE32F-NEXT: lw s1, 0(a0)
; RV32ZVE32F-NEXT: lw a0, 4(a0)
-; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw a1, 0(s2)
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s s2, v10
+; RV32ZVE32F-NEXT: sw s1, 0(s2)
; RV32ZVE32F-NEXT: sw a0, 4(s2)
-; RV32ZVE32F-NEXT: andi a0, t0, 2
+; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB46_2
; RV32ZVE32F-NEXT: .LBB46_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 0(a0)
-; RV32ZVE32F-NEXT: sw s1, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw s0, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB46_3
; RV32ZVE32F-NEXT: .LBB46_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 8
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB46_4
; RV32ZVE32F-NEXT: .LBB46_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 16
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB46_5
; RV32ZVE32F-NEXT: .LBB46_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 32
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB46_6
; RV32ZVE32F-NEXT: .LBB46_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a6, 0(a0)
; RV32ZVE32F-NEXT: sw a7, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 64
+; RV32ZVE32F-NEXT: andi a0, a1, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB46_7
; RV32ZVE32F-NEXT: .LBB46_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a4, 0(a0)
; RV32ZVE32F-NEXT: sw a5, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB46_8
; RV32ZVE32F-NEXT: j .LBB46_9
;
@@ -4677,47 +4679,47 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a7, 44(a0)
; RV32ZVE32F-NEXT: lw a4, 48(a0)
; RV32ZVE32F-NEXT: lw a5, 52(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 28(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 36(a0)
-; RV32ZVE32F-NEXT: lw s0, 8(a0)
-; RV32ZVE32F-NEXT: lw s1, 12(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw t6, 20(a0)
+; RV32ZVE32F-NEXT: lw t2, 24(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t0, 32(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw s0, 12(a0)
+; RV32ZVE32F-NEXT: lw t4, 16(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.v.x v10, a1
+; RV32ZVE32F-NEXT: li s1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi s2, t0, 1
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi s2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccu.vx v10, s1, v8
; RV32ZVE32F-NEXT: bnez s2, .LBB47_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, t0, 2
+; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB47_11
; RV32ZVE32F-NEXT: .LBB47_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, t0, 4
+; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB47_12
; RV32ZVE32F-NEXT: .LBB47_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, t0, 8
+; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB47_13
; RV32ZVE32F-NEXT: .LBB47_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, t0, 16
+; RV32ZVE32F-NEXT: andi a0, a1, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB47_14
; RV32ZVE32F-NEXT: .LBB47_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, t0, 32
+; RV32ZVE32F-NEXT: andi a0, a1, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB47_15
; RV32ZVE32F-NEXT: .LBB47_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, t0, 64
+; RV32ZVE32F-NEXT: andi a0, a1, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB47_16
; RV32ZVE32F-NEXT: .LBB47_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, t0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB47_9
; RV32ZVE32F-NEXT: .LBB47_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a2, 0(a0)
; RV32ZVE32F-NEXT: sw a3, 4(a0)
@@ -4733,60 +4735,61 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB47_10: # %cond.store
; RV32ZVE32F-NEXT: .cfi_restore_state
-; RV32ZVE32F-NEXT: lw a1, 0(a0)
+; RV32ZVE32F-NEXT: lw s1, 0(a0)
; RV32ZVE32F-NEXT: lw a0, 4(a0)
-; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw a1, 0(s2)
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s s2, v10
+; RV32ZVE32F-NEXT: sw s1, 0(s2)
; RV32ZVE32F-NEXT: sw a0, 4(s2)
-; RV32ZVE32F-NEXT: andi a0, t0, 2
+; RV32ZVE32F-NEXT: andi a0, a1, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB47_2
; RV32ZVE32F-NEXT: .LBB47_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 0(a0)
-; RV32ZVE32F-NEXT: sw s1, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw s0, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB47_3
; RV32ZVE32F-NEXT: .LBB47_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 8
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB47_4
; RV32ZVE32F-NEXT: .LBB47_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 16
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB47_5
; RV32ZVE32F-NEXT: .LBB47_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 32
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB47_6
; RV32ZVE32F-NEXT: .LBB47_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a6, 0(a0)
; RV32ZVE32F-NEXT: sw a7, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, 64
+; RV32ZVE32F-NEXT: andi a0, a1, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB47_7
; RV32ZVE32F-NEXT: .LBB47_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: sw a4, 0(a0)
; RV32ZVE32F-NEXT: sw a5, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, t0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB47_8
; RV32ZVE32F-NEXT: j .LBB47_9
;
@@ -10773,87 +10776,88 @@ define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, ptr %base, <8 x i16
; RV32ZVE32F-LABEL: mscatter_baseidx_v8i16_v8f64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a0
+; RV32ZVE32F-NEXT: li a1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: vmv.x.s a0, v0
+; RV32ZVE32F-NEXT: andi a2, a0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, a1, v8
; RV32ZVE32F-NEXT: bnez a2, .LBB94_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
-; RV32ZVE32F-NEXT: bnez a0, .LBB94_10
+; RV32ZVE32F-NEXT: andi a1, a0, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_10
; RV32ZVE32F-NEXT: .LBB94_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
-; RV32ZVE32F-NEXT: bnez a0, .LBB94_11
+; RV32ZVE32F-NEXT: andi a1, a0, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_11
; RV32ZVE32F-NEXT: .LBB94_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
-; RV32ZVE32F-NEXT: bnez a0, .LBB94_12
+; RV32ZVE32F-NEXT: andi a1, a0, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_12
; RV32ZVE32F-NEXT: .LBB94_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
-; RV32ZVE32F-NEXT: bnez a0, .LBB94_13
+; RV32ZVE32F-NEXT: andi a1, a0, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_13
; RV32ZVE32F-NEXT: .LBB94_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
-; RV32ZVE32F-NEXT: bnez a0, .LBB94_14
+; RV32ZVE32F-NEXT: andi a1, a0, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_14
; RV32ZVE32F-NEXT: .LBB94_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
-; RV32ZVE32F-NEXT: bnez a0, .LBB94_15
+; RV32ZVE32F-NEXT: andi a1, a0, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_15
; RV32ZVE32F-NEXT: .LBB94_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB94_16
; RV32ZVE32F-NEXT: .LBB94_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB94_9: # %cond.store
-; RV32ZVE32F-NEXT: vmv.x.s a0, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
-; RV32ZVE32F-NEXT: beqz a0, .LBB94_2
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_2
; RV32ZVE32F-NEXT: .LBB94_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
-; RV32ZVE32F-NEXT: beqz a0, .LBB94_3
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_3
; RV32ZVE32F-NEXT: .LBB94_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
-; RV32ZVE32F-NEXT: beqz a0, .LBB94_4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_4
; RV32ZVE32F-NEXT: .LBB94_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
-; RV32ZVE32F-NEXT: beqz a0, .LBB94_5
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_5
; RV32ZVE32F-NEXT: .LBB94_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
-; RV32ZVE32F-NEXT: beqz a0, .LBB94_6
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_6
; RV32ZVE32F-NEXT: .LBB94_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
-; RV32ZVE32F-NEXT: beqz a0, .LBB94_7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_7
; RV32ZVE32F-NEXT: .LBB94_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a0, a0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB94_8
; RV32ZVE32F-NEXT: .LBB94_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: fsd fa7, 0(a0)
; RV32ZVE32F-NEXT: ret
@@ -10974,87 +10978,88 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-LABEL: mscatter_baseidx_sext_v8i16_v8f64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a0
+; RV32ZVE32F-NEXT: li a1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: vmv.x.s a0, v0
+; RV32ZVE32F-NEXT: andi a2, a0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccus.vx v10, a1, v8
; RV32ZVE32F-NEXT: bnez a2, .LBB95_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
-; RV32ZVE32F-NEXT: bnez a0, .LBB95_10
+; RV32ZVE32F-NEXT: andi a1, a0, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_10
; RV32ZVE32F-NEXT: .LBB95_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
-; RV32ZVE32F-NEXT: bnez a0, .LBB95_11
+; RV32ZVE32F-NEXT: andi a1, a0, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_11
; RV32ZVE32F-NEXT: .LBB95_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
-; RV32ZVE32F-NEXT: bnez a0, .LBB95_12
+; RV32ZVE32F-NEXT: andi a1, a0, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_12
; RV32ZVE32F-NEXT: .LBB95_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
-; RV32ZVE32F-NEXT: bnez a0, .LBB95_13
+; RV32ZVE32F-NEXT: andi a1, a0, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_13
; RV32ZVE32F-NEXT: .LBB95_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
-; RV32ZVE32F-NEXT: bnez a0, .LBB95_14
+; RV32ZVE32F-NEXT: andi a1, a0, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_14
; RV32ZVE32F-NEXT: .LBB95_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
-; RV32ZVE32F-NEXT: bnez a0, .LBB95_15
+; RV32ZVE32F-NEXT: andi a1, a0, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_15
; RV32ZVE32F-NEXT: .LBB95_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB95_16
; RV32ZVE32F-NEXT: .LBB95_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB95_9: # %cond.store
-; RV32ZVE32F-NEXT: vmv.x.s a0, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
-; RV32ZVE32F-NEXT: beqz a0, .LBB95_2
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_2
; RV32ZVE32F-NEXT: .LBB95_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
-; RV32ZVE32F-NEXT: beqz a0, .LBB95_3
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_3
; RV32ZVE32F-NEXT: .LBB95_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
-; RV32ZVE32F-NEXT: beqz a0, .LBB95_4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_4
; RV32ZVE32F-NEXT: .LBB95_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
-; RV32ZVE32F-NEXT: beqz a0, .LBB95_5
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_5
; RV32ZVE32F-NEXT: .LBB95_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
-; RV32ZVE32F-NEXT: beqz a0, .LBB95_6
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_6
; RV32ZVE32F-NEXT: .LBB95_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
-; RV32ZVE32F-NEXT: beqz a0, .LBB95_7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_7
; RV32ZVE32F-NEXT: .LBB95_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a0, a0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB95_8
; RV32ZVE32F-NEXT: .LBB95_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: fsd fa7, 0(a0)
; RV32ZVE32F-NEXT: ret
@@ -11177,87 +11182,88 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-LABEL: mscatter_baseidx_zext_v8i16_v8f64:
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
+; RV32ZVE32F-NEXT: vmv.v.x v10, a0
+; RV32ZVE32F-NEXT: li a1, 8
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: vmv.x.s a0, v0
+; RV32ZVE32F-NEXT: andi a2, a0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RV32ZVE32F-NEXT: vwmaccu.vx v10, a1, v8
; RV32ZVE32F-NEXT: bnez a2, .LBB96_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
-; RV32ZVE32F-NEXT: bnez a0, .LBB96_10
+; RV32ZVE32F-NEXT: andi a1, a0, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_10
; RV32ZVE32F-NEXT: .LBB96_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
-; RV32ZVE32F-NEXT: bnez a0, .LBB96_11
+; RV32ZVE32F-NEXT: andi a1, a0, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_11
; RV32ZVE32F-NEXT: .LBB96_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
-; RV32ZVE32F-NEXT: bnez a0, .LBB96_12
+; RV32ZVE32F-NEXT: andi a1, a0, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_12
; RV32ZVE32F-NEXT: .LBB96_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
-; RV32ZVE32F-NEXT: bnez a0, .LBB96_13
+; RV32ZVE32F-NEXT: andi a1, a0, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_13
; RV32ZVE32F-NEXT: .LBB96_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
-; RV32ZVE32F-NEXT: bnez a0, .LBB96_14
+; RV32ZVE32F-NEXT: andi a1, a0, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_14
; RV32ZVE32F-NEXT: .LBB96_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
-; RV32ZVE32F-NEXT: bnez a0, .LBB96_15
+; RV32ZVE32F-NEXT: andi a1, a0, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_15
; RV32ZVE32F-NEXT: .LBB96_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB96_16
; RV32ZVE32F-NEXT: .LBB96_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB96_9: # %cond.store
-; RV32ZVE32F-NEXT: vmv.x.s a0, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
-; RV32ZVE32F-NEXT: beqz a0, .LBB96_2
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_2
; RV32ZVE32F-NEXT: .LBB96_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
-; RV32ZVE32F-NEXT: beqz a0, .LBB96_3
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 1
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_3
; RV32ZVE32F-NEXT: .LBB96_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
-; RV32ZVE32F-NEXT: beqz a0, .LBB96_4
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_4
; RV32ZVE32F-NEXT: .LBB96_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
-; RV32ZVE32F-NEXT: beqz a0, .LBB96_5
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_5
; RV32ZVE32F-NEXT: .LBB96_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
-; RV32ZVE32F-NEXT: beqz a0, .LBB96_6
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_6
; RV32ZVE32F-NEXT: .LBB96_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
-; RV32ZVE32F-NEXT: beqz a0, .LBB96_7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a0, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_7
; RV32ZVE32F-NEXT: .LBB96_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a0, a0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB96_8
; RV32ZVE32F-NEXT: .LBB96_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vslidedown.vi v8, v8, 7
+; RV32ZVE32F-NEXT: vslidedown.vi v8, v10, 7
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV32ZVE32F-NEXT: fsd fa7, 0(a0)
; RV32ZVE32F-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index df9ff0fc39a7e8..9281bf87f98785 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -2209,25 +2209,25 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV32-LABEL: vpgather_baseidx_zext_v32i8_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: li a3, 16
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a3, 8
+; RV32-NEXT: li a4, 16
+; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a3
; RV32-NEXT: mv a2, a1
-; RV32-NEXT: bltu a1, a3, .LBB97_2
+; RV32-NEXT: bltu a1, a4, .LBB97_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB97_2:
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: addi a2, a1, -16
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e16, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2235,25 +2235,25 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV64-LABEL: vpgather_baseidx_zext_v32i8_v32f64:
; RV64: # %bb.0:
; RV64-NEXT: li a2, 32
-; RV64-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsll.vi v16, v12, 3
+; RV64-NEXT: li a3, 8
+; RV64-NEXT: li a4, 16
+; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a3
; RV64-NEXT: mv a2, a1
-; RV64-NEXT: bltu a1, a3, .LBB97_2
+; RV64-NEXT: bltu a1, a4, .LBB97_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a2, 16
; RV64-NEXT: .LBB97_2:
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: addi a2, a1, -16
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v16, (a0), v24, v0.t
; RV64-NEXT: ret
@@ -2267,25 +2267,25 @@ define <32 x double> @vpgather_baseidx_v32i16_v32f64(ptr %base, <32 x i16> %idxs
; RV32-LABEL: vpgather_baseidx_v32i16_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: li a3, 16
-; RV32-NEXT: vsll.vi v16, v16, 3
+; RV32-NEXT: li a3, 8
+; RV32-NEXT: li a4, 16
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a3
; RV32-NEXT: mv a2, a1
-; RV32-NEXT: bltu a1, a3, .LBB98_2
+; RV32-NEXT: bltu a1, a4, .LBB98_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB98_2:
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: addi a2, a1, -16
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2325,25 +2325,25 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(ptr %base, <32 x i16>
; RV32-LABEL: vpgather_baseidx_sext_v32i16_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: li a3, 16
-; RV32-NEXT: vsll.vi v16, v16, 3
+; RV32-NEXT: li a3, 8
+; RV32-NEXT: li a4, 16
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a3
; RV32-NEXT: mv a2, a1
-; RV32-NEXT: bltu a1, a3, .LBB99_2
+; RV32-NEXT: bltu a1, a4, .LBB99_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB99_2:
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: addi a2, a1, -16
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2385,25 +2385,25 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(ptr %base, <32 x i16>
; RV32-LABEL: vpgather_baseidx_zext_v32i16_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT: vzext.vf2 v16, v8
-; RV32-NEXT: li a3, 16
-; RV32-NEXT: vsll.vi v16, v16, 3
+; RV32-NEXT: li a3, 8
+; RV32-NEXT: li a4, 16
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a3
; RV32-NEXT: mv a2, a1
-; RV32-NEXT: bltu a1, a3, .LBB100_2
+; RV32-NEXT: bltu a1, a4, .LBB100_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB100_2:
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: addi a2, a1, -16
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2411,25 +2411,25 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(ptr %base, <32 x i16>
; RV64-LABEL: vpgather_baseidx_zext_v32i16_v32f64:
; RV64: # %bb.0:
; RV64-NEXT: li a2, 32
-; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsll.vi v16, v16, 3
+; RV64-NEXT: li a3, 8
+; RV64-NEXT: li a4, 16
+; RV64-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a3
; RV64-NEXT: mv a2, a1
-; RV64-NEXT: bltu a1, a3, .LBB100_2
+; RV64-NEXT: bltu a1, a4, .LBB100_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a2, 16
; RV64-NEXT: .LBB100_2:
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: addi a2, a1, -16
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV64-NEXT: ret
@@ -2468,20 +2468,19 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV64-LABEL: vpgather_baseidx_v32i32_v32f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v16, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v8
+; RV64-NEXT: vslidedown.vi v24, v8, 16
+; RV64-NEXT: li a2, 8
; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsext.vf2 v8, v16
-; RV64-NEXT: vsll.vi v16, v8, 3
-; RV64-NEXT: vsll.vi v8, v24, 3
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v24, a2
+; RV64-NEXT: vwmulsu.vx v24, v8, a2
; RV64-NEXT: mv a2, a1
; RV64-NEXT: bltu a1, a3, .LBB101_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a2, 16
; RV64-NEXT: .LBB101_2:
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
@@ -2524,22 +2523,20 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
;
; RV64-LABEL: vpgather_baseidx_sext_v32i32_v32f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v8
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 16
+; RV64-NEXT: vslidedown.vi v24, v8, 16
+; RV64-NEXT: li a2, 8
; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 3
-; RV64-NEXT: vsll.vi v8, v24, 3
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v24, a2
+; RV64-NEXT: vwmulsu.vx v24, v8, a2
; RV64-NEXT: mv a2, a1
; RV64-NEXT: bltu a1, a3, .LBB102_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a2, 16
; RV64-NEXT: .LBB102_2:
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
@@ -2583,22 +2580,20 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
;
; RV64-LABEL: vpgather_baseidx_zext_v32i32_v32f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v8
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v8, 16
+; RV64-NEXT: vslidedown.vi v24, v8, 16
+; RV64-NEXT: li a2, 8
; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 3
-; RV64-NEXT: vsll.vi v8, v24, 3
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v24, a2
+; RV64-NEXT: vwmulu.vx v24, v8, a2
; RV64-NEXT: mv a2, a1
; RV64-NEXT: bltu a1, a3, .LBB103_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a2, 16
; RV64-NEXT: .LBB103_2:
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index d691dcd5c54b6f..e08c2fe8bf82ed 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -1842,36 +1842,35 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: slli a4, a3, 3
-; RV64-NEXT: add a3, a4, a3
+; RV64-NEXT: slli a3, a3, 4
; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x09, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 9 * vlenb
-; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a3, a3, 3
; RV64-NEXT: add a3, sp, a3
; RV64-NEXT: addi a3, a3, 16
; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV64-NEXT: addi a3, sp, 16
+; RV64-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV64-NEXT: li a3, 32
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vle32.v v24, (a1)
+; RV64-NEXT: li a1, 8
; RV64-NEXT: li a3, 16
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV64-NEXT: vslidedown.vi v16, v24, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v0, v24
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsll.vi v24, v0, 3
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v8, v16, a1
+; RV64-NEXT: vwmulsu.vx v16, v24, a1
; RV64-NEXT: mv a1, a2
; RV64-NEXT: bltu a2, a3, .LBB84_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB84_2:
; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vl1r.v v0, (a3) # Unknown-size Folded Reload
+; RV64-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
+; RV64-NEXT: vsoxei64.v v24, (a0), v16, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
@@ -1879,14 +1878,14 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 3
; RV64-NEXT: add a2, sp, a2
; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV64-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a1, a0, 3
-; RV64-NEXT: add a0, a1, a0
+; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: .cfi_def_cfa sp, 16
; RV64-NEXT: addi sp, sp, 16
@@ -1942,24 +1941,22 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: li a3, 32
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vle32.v v24, (a1)
+; RV64-NEXT: li a1, 8
; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v24
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v24, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vsll.vi v24, v16, 3
+; RV64-NEXT: vslidedown.vi v16, v24, 16
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v8, v16, a1
+; RV64-NEXT: vwmulsu.vx v16, v24, a1
; RV64-NEXT: mv a1, a2
; RV64-NEXT: bltu a2, a3, .LBB85_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB85_2:
; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV64-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsoxei64.v v16, (a0), v24, v0.t
+; RV64-NEXT: vsoxei64.v v24, (a0), v16, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
@@ -2031,24 +2028,22 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: li a3, 32
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vle32.v v24, (a1)
+; RV64-NEXT: li a1, 8
; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v24
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v8, v24, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vsll.vi v24, v16, 3
+; RV64-NEXT: vslidedown.vi v16, v24, 16
+; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v8, v16, a1
+; RV64-NEXT: vwmulu.vx v16, v24, a1
; RV64-NEXT: mv a1, a2
; RV64-NEXT: bltu a2, a3, .LBB86_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB86_2:
; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV64-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsoxei64.v v16, (a0), v24, v0.t
+; RV64-NEXT: vsoxei64.v v24, (a0), v16, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll
index fce22849a58afe..f9fef50112676c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsll.ll
@@ -432,19 +432,35 @@ define <8 x i32> @vwsll_vx_i8_v8i32_zext(<8 x i16> %a, i8 %b) {
}
define <8 x i32> @vwsll_vi_v8i32(<8 x i16> %a) {
-; CHECK-LABEL: vwsll_vi_v8i32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
-; CHECK-NEXT: ret
+; RV32-LABEL: vwsll_vi_v8i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vzext.vf2 v10, v8
+; RV32-NEXT: vsll.vi v8, v10, 2
+; RV32-NEXT: ret
;
-; CHECK-ZVBB-LABEL: vwsll_vi_v8i32:
-; CHECK-ZVBB: # %bb.0:
-; CHECK-ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-ZVBB-NEXT: vwsll.vi v10, v8, 2
-; CHECK-ZVBB-NEXT: vmv2r.v v8, v10
-; CHECK-ZVBB-NEXT: ret
+; RV64-LABEL: vwsll_vi_v8i32:
+; RV64: # %bb.0:
+; RV64-NEXT: li a0, 4
+; RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v10, v8, a0
+; RV64-NEXT: vmv2r.v v8, v10
+; RV64-NEXT: ret
+;
+; CHECK-ZVBB-RV32-LABEL: vwsll_vi_v8i32:
+; CHECK-ZVBB-RV32: # %bb.0:
+; CHECK-ZVBB-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-ZVBB-RV32-NEXT: vwsll.vi v10, v8, 2
+; CHECK-ZVBB-RV32-NEXT: vmv2r.v v8, v10
+; CHECK-ZVBB-RV32-NEXT: ret
+;
+; CHECK-ZVBB-RV64-LABEL: vwsll_vi_v8i32:
+; CHECK-ZVBB-RV64: # %bb.0:
+; CHECK-ZVBB-RV64-NEXT: li a0, 4
+; CHECK-ZVBB-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-ZVBB-RV64-NEXT: vwmulu.vx v10, v8, a0
+; CHECK-ZVBB-RV64-NEXT: vmv2r.v v8, v10
+; CHECK-ZVBB-RV64-NEXT: ret
%x = zext <8 x i16> %a to <8 x i32>
%z = shl <8 x i32> %x, splat (i32 2)
ret <8 x i32> %z
@@ -654,15 +670,17 @@ define <16 x i16> @vwsll_vx_i8_v16i16_zext(<16 x i8> %a, i8 %b) {
define <16 x i16> @vwsll_vi_v16i16(<16 x i8> %a) {
; CHECK-LABEL: vwsll_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_v16i16:
; CHECK-ZVBB: # %bb.0:
+; CHECK-ZVBB-NEXT: li a0, 4
; CHECK-ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-ZVBB-NEXT: vwsll.vi v10, v8, 2
+; CHECK-ZVBB-NEXT: vwmulu.vx v10, v8, a0
; CHECK-ZVBB-NEXT: vmv2r.v v8, v10
; CHECK-ZVBB-NEXT: ret
%x = zext <16 x i8> %a to <16 x i16>
More information about the llvm-commits
mailing list