[llvm] [RISCV] Fold vector shift of sext/zext to widening multiply (PR #121563)
Piotr Fusik via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 3 03:43:19 PST 2025
https://github.com/pfusik created https://github.com/llvm/llvm-project/pull/121563
(shl (sext X), C) -> (vwmulsu X, 1u << C)
(shl (zext X), C) -> (vwmulu X, 1u << C)
>From 15412e74bf32e8588dab70427ff178720cbf6c26 Mon Sep 17 00:00:00 2001
From: Piotr Fusik <p.fusik at samsung.com>
Date: Fri, 3 Jan 2025 12:42:10 +0100
Subject: [PATCH] [RISCV] Fold vector shift of sext/zext to widening multiply
(shl (sext X), C) -> (vwmulsu X, 1u << C)
(shl (zext X), C) -> (vwmulu X, 1u << C)
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 76 ++-
llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll | 202 ++++----
.../test/CodeGen/RISCV/rvv/mscatter-sdnode.ll | 214 ++++-----
.../test/CodeGen/RISCV/rvv/vpgather-sdnode.ll | 423 ++++++++---------
.../CodeGen/RISCV/rvv/vpscatter-sdnode.ll | 431 ++++++++----------
llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll | 21 +-
llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll | 34 +-
7 files changed, 734 insertions(+), 667 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 04dd23d9cdaa20..955a15393ca8a1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -17341,6 +17341,78 @@ static SDValue combineScalarCTPOPToVCPOP(SDNode *N, SelectionDAG &DAG,
return DAG.getZExtOrTrunc(Pop, DL, VT);
}
+static SDValue combineSHL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
+ const RISCVSubtarget &Subtarget) {
+ if (DCI.isBeforeLegalize())
+ return SDValue();
+
+ // (shl (zext x), y) -> (vwsll x, y)
+ if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
+ return V;
+
+ // (shl (sext x), C) -> (vwmulsu x, 1u << C)
+ // (shl (zext x), C) -> (vwmulu x, 1u << C)
+
+ SDValue LHS = N->getOperand(0);
+ if (!LHS.hasOneUse())
+ return SDValue();
+ unsigned Opcode;
+ switch (LHS.getOpcode()) {
+ case ISD::SIGN_EXTEND:
+ Opcode = RISCVISD::VWMULSU_VL;
+ break;
+ case ISD::ZERO_EXTEND:
+ Opcode = RISCVISD::VWMULU_VL;
+ break;
+ default:
+ return SDValue();
+ }
+
+ SDValue RHS = N->getOperand(1);
+ APInt ShAmt;
+ if (!ISD::isConstantSplatVector(RHS.getNode(), ShAmt))
+ return SDValue();
+
+ // Better foldings:
+ // (shl (sext x), 1) -> (vwadd x, x)
+ // (shl (zext x), 1) -> (vwaddu x, x)
+ uint64_t ShAmtInt = ShAmt.getZExtValue();
+ if (ShAmtInt <= 1)
+ return SDValue();
+
+ SDValue NarrowOp = LHS.getOperand(0);
+ EVT NarrowVT = NarrowOp.getValueType();
+ uint64_t NarrowBits = NarrowVT.getScalarSizeInBits();
+ if (ShAmtInt >= NarrowBits)
+ return SDValue();
+ EVT VT = N->getValueType(0);
+ if (NarrowBits * 2 != VT.getScalarSizeInBits())
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+ SDValue Passthru, Mask, VL;
+ switch (N->getOpcode()) {
+ case ISD::SHL:
+ if (!VT.isScalableVector())
+ return SDValue();
+ Passthru = DAG.getUNDEF(VT);
+ std::tie(Mask, VL) =
+ getDefaultScalableVLOps(VT.getSimpleVT(), DL, DAG, Subtarget);
+ break;
+ case RISCVISD::SHL_VL:
+ Passthru = N->getOperand(2);
+ Mask = N->getOperand(3);
+ VL = N->getOperand(4);
+ break;
+ default:
+ llvm_unreachable("Expected SHL");
+ }
+ return DAG.getNode(Opcode, DL, VT, NarrowOp,
+ DAG.getConstant(1ULL << ShAmtInt, SDLoc(RHS), NarrowVT),
+ Passthru, Mask, VL);
+}
+
SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -17970,7 +18042,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
break;
}
case RISCVISD::SHL_VL:
- if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
+ if (SDValue V = combineSHL(N, DCI, Subtarget))
return V;
[[fallthrough]];
case RISCVISD::SRA_VL:
@@ -17995,7 +18067,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SRL:
case ISD::SHL: {
if (N->getOpcode() == ISD::SHL) {
- if (SDValue V = combineOp_VLToVWOp_VL(N, DCI, Subtarget))
+ if (SDValue V = combineSHL(N, DCI, Subtarget))
return V;
}
SDValue ShAmt = N->getOperand(1);
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index 9ee2324f615dd8..0fad09f27007c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -775,11 +775,11 @@ define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i8_nxv8i32(ptr %base, <vscal
define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
@@ -791,10 +791,11 @@ define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscal
define <vscale x 8 x i32> @mgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -815,10 +816,11 @@ define <vscale x 8 x i32> @mgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x
define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -840,10 +842,11 @@ define <vscale x 8 x i32> @mgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vsca
define <vscale x 8 x i32> @mgather_baseidx_zext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i32> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT: vzext.vf2 v16, v8
-; CHECK-NEXT: vsll.vi v8, v16, 2
-; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v8, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v12, (a0), v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
@@ -863,10 +866,9 @@ define <vscale x 8 x i32> @mgather_baseidx_nxv8i32(ptr %base, <vscale x 8 x i32>
;
; RV64-LABEL: mgather_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, mu
+; RV64-NEXT: vwmulsu.vx v16, v8, a1
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
@@ -1034,11 +1036,11 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(ptr %base, <vscal
define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v16, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
@@ -1050,11 +1052,11 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscal
define <vscale x 8 x i64> @mgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -1074,11 +1076,11 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x
define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -1099,11 +1101,11 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vsca
define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x i64> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v12, v8
-; CHECK-NEXT: vsll.vi v8, v12, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v12, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
@@ -1124,10 +1126,11 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i32_nxv8i64(ptr %base, <vscale x
;
; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
@@ -1147,10 +1150,11 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(ptr %base, <vsca
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
@@ -1171,10 +1175,11 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(ptr %base, <vsca
;
; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vzext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
@@ -1845,11 +1850,11 @@ define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i8_nxv8f32(ptr %base, <vsc
define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vluxei16.v v12, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
@@ -1861,10 +1866,11 @@ define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vsc
define <vscale x 8 x float> @mgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -1885,10 +1891,11 @@ define <vscale x 8 x float> @mgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale
define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; RV32-NEXT: vsext.vf2 v16, v8
-; RV32-NEXT: vsll.vi v8, v16, 2
-; RV32-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -1910,10 +1917,11 @@ define <vscale x 8 x float> @mgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vs
define <vscale x 8 x float> @mgather_baseidx_zext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x float> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
-; CHECK-NEXT: vzext.vf2 v16, v8
-; CHECK-NEXT: vsll.vi v8, v16, 2
-; CHECK-NEXT: vluxei32.v v12, (a0), v8, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v8, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v12, (a0), v16, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
@@ -1933,10 +1941,9 @@ define <vscale x 8 x float> @mgather_baseidx_nxv8f32(ptr %base, <vscale x 8 x i3
;
; RV64-LABEL: mgather_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, mu
+; RV64-NEXT: vwmulsu.vx v16, v8, a1
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
@@ -2104,11 +2111,11 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(ptr %base, <vs
define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i8_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei16.v v16, (a0), v10, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
@@ -2120,11 +2127,11 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vs
define <vscale x 8 x double> @mgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -2144,11 +2151,11 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale
define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; RV32-LABEL: mgather_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
@@ -2169,11 +2176,11 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <v
define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, <vscale x 8 x double> %passthru) {
; CHECK-LABEL: mgather_baseidx_zext_nxv8i16_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v12, v8
-; CHECK-NEXT: vsll.vi v8, v12, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v12, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vluxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: vluxei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
@@ -2194,10 +2201,11 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8i32_nxv8f64(ptr %base, <vscale
;
; RV64-LABEL: mgather_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
@@ -2217,10 +2225,11 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(ptr %base, <v
;
; RV64-LABEL: mgather_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
@@ -2241,10 +2250,11 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(ptr %base, <v
;
; RV64-LABEL: mgather_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vzext.vf2 v24, v8
-; RV64-NEXT: vsll.vi v8, v24, 3
-; RV64-NEXT: vluxei64.v v16, (a0), v8, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v8, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index 77a1f508d22184..3cf7cc9cb51526 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -581,11 +581,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %
define void @mscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v14, v12
-; CHECK-NEXT: vsll.vi v12, v14, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v14, v12, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -596,10 +596,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %
define void @mscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i32:
@@ -618,10 +619,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base
define void @mscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32:
@@ -641,10 +643,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @mscatter_baseidx_zext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v16, v12
-; CHECK-NEXT: vsll.vi v12, v16, 2
-; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v12, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -662,10 +665,9 @@ define void @mscatter_baseidx_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscal
;
; RV64-LABEL: mscatter_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a1
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
@@ -817,11 +819,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %
define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v18, v16
-; CHECK-NEXT: vsll.vi v16, v18, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v18, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -832,11 +834,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %
define void @mscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i64:
@@ -854,11 +856,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base
define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64:
@@ -877,11 +879,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v20, v16
-; CHECK-NEXT: vsll.vi v16, v20, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v20, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -900,10 +902,11 @@ define void @mscatter_baseidx_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr %base
;
; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> %val, <vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m)
@@ -921,10 +924,11 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -943,10 +947,11 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1475,11 +1480,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr
define void @mscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v14, v12
-; CHECK-NEXT: vsll.vi v12, v14, 2
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v14, v12, a1
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1490,10 +1495,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr
define void @mscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f32:
@@ -1512,10 +1518,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %ba
define void @mscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a1, 4
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a1
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32:
@@ -1535,10 +1542,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, pt
define void @mscatter_baseidx_zext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v16, v12
-; CHECK-NEXT: vsll.vi v12, v16, 2
-; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v16, v12, a1
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1556,10 +1564,9 @@ define void @mscatter_baseidx_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vsc
;
; RV64-LABEL: mscatter_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a1, 4
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a1
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
@@ -1711,11 +1718,11 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, pt
define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v18, v16
-; CHECK-NEXT: vsll.vi v16, v18, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v18, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1726,11 +1733,11 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, pt
define void @mscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f64:
@@ -1748,11 +1755,11 @@ define void @mscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %b
define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a1
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64:
@@ -1771,11 +1778,11 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, p
define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; CHECK-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vzext.vf2 v20, v16
-; CHECK-NEXT: vsll.vi v16, v20, 3
+; CHECK-NEXT: li a1, 8
+; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; CHECK-NEXT: vwmulu.vx v20, v16, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; CHECK-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1794,10 +1801,11 @@ define void @mscatter_baseidx_nxv8i32_nxv8f64(<vscale x 8 x double> %val, ptr %b
;
; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0(<vscale x 8 x double> %val, <vscale x 8 x ptr> %ptrs, i32 8, <vscale x 8 x i1> %m)
@@ -1815,10 +1823,11 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, p
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1837,10 +1846,11 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, p
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a1, 8
+; RV64-NEXT: vsetvli a2, zero, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2015,15 +2025,15 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vslidedown.vx v7, v0, a1
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v24, v8
-; RV32-NEXT: vsll.vi v8, v24, 3
+; RV32-NEXT: li a1, 8
+; RV32-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v8, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; RV32-NEXT: vsoxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vsoxei32.v v16, (a0), v12, v0.t
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsoxei32.v v8, (a0), v28, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index abe7bdad8125ae..1007d1ce649cc4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -799,18 +799,18 @@ define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i8_nxv8i32(ptr %base, <vsca
define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v12, v10, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v12, v10, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
@@ -823,10 +823,11 @@ define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i8_nxv8i32(ptr %base, <vsca
define <vscale x 8 x i32> @vpgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8i32:
@@ -845,10 +846,11 @@ define <vscale x 8 x i32> @vpgather_baseidx_nxv8i16_nxv8i32(ptr %base, <vscale x
define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i32:
@@ -868,18 +870,20 @@ define <vscale x 8 x i32> @vpgather_baseidx_sext_nxv8i16_nxv8i32(ptr %base, <vsc
define <vscale x 8 x i32> @vpgather_baseidx_zext_nxv8i16_nxv8i32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v8, v12, 2
-; RV64-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -897,10 +901,9 @@ define <vscale x 8 x i32> @vpgather_baseidx_nxv8i32(ptr %base, <vscale x 8 x i32
;
; RV64-LABEL: vpgather_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
@@ -1049,18 +1052,18 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i8_nxv8i64(ptr %base, <vsca
define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v16, v10, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v16, v10, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -1073,9 +1076,9 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(ptr %base, <vsca
define <vscale x 8 x i64> @vpgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -1095,9 +1098,9 @@ define <vscale x 8 x i64> @vpgather_baseidx_nxv8i16_nxv8i64(ptr %base, <vscale x
define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -1118,18 +1121,18 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(ptr %base, <vsc
define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i16_nxv8i64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v16, v12, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -1150,10 +1153,11 @@ define <vscale x 8 x i64> @vpgather_baseidx_nxv8i32_nxv8i64(ptr %base, <vscale x
;
; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -1171,10 +1175,11 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i32_nxv8i64(ptr %base, <vsc
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1193,10 +1198,11 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i32_nxv8i64(ptr %base, <vsc
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1742,18 +1748,18 @@ define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i8_nxv8f32(ptr %base, <vs
define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v12, v10, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v12, v10, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
@@ -1766,10 +1772,11 @@ define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i8_nxv8f32(ptr %base, <vs
define <vscale x 8 x float> @vpgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv8i16_nxv8f32:
@@ -1788,10 +1795,11 @@ define <vscale x 8 x float> @vpgather_baseidx_nxv8i16_nxv8f32(ptr %base, <vscale
define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f32:
@@ -1811,18 +1819,20 @@ define <vscale x 8 x float> @vpgather_baseidx_sext_nxv8i16_nxv8f32(ptr %base, <v
define <vscale x 8 x float> @vpgather_baseidx_zext_nxv8i16_nxv8f32(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v12, v8, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v8, v12, 2
-; RV64-NEXT: vluxei32.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v12, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1840,10 +1850,9 @@ define <vscale x 8 x float> @vpgather_baseidx_nxv8f32(ptr %base, <vscale x 8 x i
;
; RV64-LABEL: vpgather_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
@@ -1992,18 +2001,18 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i8_nxv6f64(ptr %base, <v
define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i8_nxv6f64(ptr %base, <vscale x 6 x i8> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv6i8_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v16, v10, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv6i8_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v16, v10, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2016,9 +2025,9 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i8_nxv6f64(ptr %base, <v
define <vscale x 6 x double> @vpgather_baseidx_nxv6i16_nxv6f64(ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2038,9 +2047,9 @@ define <vscale x 6 x double> @vpgather_baseidx_nxv6i16_nxv6f64(ptr %base, <vscal
define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i16_nxv6f64(ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2061,18 +2070,18 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i16_nxv6f64(ptr %base, <
define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i16_nxv6f64(ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv6i16_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v16, v12, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2093,10 +2102,11 @@ define <vscale x 6 x double> @vpgather_baseidx_nxv6i32_nxv6f64(ptr %base, <vscal
;
; RV64-LABEL: vpgather_baseidx_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i32> %idxs
%v = call <vscale x 6 x double> @llvm.vp.gather.nxv6f64.nxv6p0(<vscale x 6 x ptr> %ptrs, <vscale x 6 x i1> %m, i32 %evl)
@@ -2114,10 +2124,11 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i32_nxv6f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_sext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -2136,10 +2147,11 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i32_nxv6f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_zext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -2235,18 +2247,18 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i8_nxv8f64(ptr %base, <v
define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v10, v8
-; RV32-NEXT: vsll.vi v16, v10, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i8_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v10, v8
-; RV64-NEXT: vsll.vi v16, v10, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2259,9 +2271,9 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(ptr %base, <v
define <vscale x 8 x double> @vpgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2281,9 +2293,9 @@ define <vscale x 8 x double> @vpgather_baseidx_nxv8i16_nxv8f64(ptr %base, <vscal
define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
@@ -2304,18 +2316,18 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(ptr %base, <
define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i16_nxv8f64(ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v12, v8
-; RV32-NEXT: vsll.vi v16, v12, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v8, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i16_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v12, v8
-; RV64-NEXT: vsll.vi v16, v12, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2336,10 +2348,11 @@ define <vscale x 8 x double> @vpgather_baseidx_nxv8i32_nxv8f64(ptr %base, <vscal
;
; RV64-LABEL: vpgather_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
%v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0(<vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -2357,10 +2370,11 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i32_nxv8f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2379,10 +2393,11 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i32_nxv8f64(ptr %base, <
;
; RV64-LABEL: vpgather_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v8
-; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v8, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2465,27 +2480,26 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv1r.v v12, v0
-; RV32-NEXT: vsext.vf2 v16, v8
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: vsll.vi v24, v16, 3
-; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: srli a4, a2, 3
-; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a4
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v8, a3
+; RV32-NEXT: mv a3, a1
; RV32-NEXT: bltu a1, a2, .LBB112_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: mv a1, a2
+; RV32-NEXT: mv a3, a2
; RV32-NEXT: .LBB112_2:
-; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: sub a3, a1, a2
+; RV32-NEXT: srli a2, a2, 3
+; RV32-NEXT: sltu a1, a1, a3
+; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv16i16_nxv16f64:
@@ -2523,27 +2537,26 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv1r.v v12, v0
-; RV32-NEXT: vsext.vf2 v16, v8
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: vsll.vi v24, v16, 3
-; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: srli a4, a2, 3
-; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a4
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v8, a3
+; RV32-NEXT: mv a3, a1
; RV32-NEXT: bltu a1, a2, .LBB113_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: mv a1, a2
+; RV32-NEXT: mv a3, a2
; RV32-NEXT: .LBB113_2:
-; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: sub a3, a1, a2
+; RV32-NEXT: srli a2, a2, 3
+; RV32-NEXT: sltu a1, a1, a3
+; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_sext_nxv16i16_nxv16f64:
@@ -2582,52 +2595,50 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv1r.v v12, v0
-; RV32-NEXT: vzext.vf2 v16, v8
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: vsll.vi v24, v16, 3
-; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: srli a4, a2, 3
-; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a4
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulu.vx v24, v8, a3
+; RV32-NEXT: mv a3, a1
; RV32-NEXT: bltu a1, a2, .LBB114_2
; RV32-NEXT: # %bb.1:
-; RV32-NEXT: mv a1, a2
+; RV32-NEXT: mv a3, a2
; RV32-NEXT: .LBB114_2:
-; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: sub a3, a1, a2
+; RV32-NEXT: srli a2, a2, 3
+; RV32-NEXT: sltu a1, a1, a3
+; RV32-NEXT: addi a1, a1, -1
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: and a1, a1, a3
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_zext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli a2, zero, e32, m8, ta, ma
-; RV64-NEXT: vmv1r.v v12, v0
-; RV64-NEXT: vzext.vf2 v16, v8
+; RV64-NEXT: li a3, 8
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: vsll.vi v24, v16, 3
-; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: srli a4, a2, 3
-; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a4
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
-; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV64-NEXT: vluxei32.v v16, (a0), v28, v0.t
+; RV64-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v8, a3
+; RV64-NEXT: mv a3, a1
; RV64-NEXT: bltu a1, a2, .LBB114_2
; RV64-NEXT: # %bb.1:
-; RV64-NEXT: mv a1, a2
+; RV64-NEXT: mv a3, a2
; RV64-NEXT: .LBB114_2:
-; RV64-NEXT: vmv1r.v v0, v12
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v24, v0.t
+; RV64-NEXT: sub a3, a1, a2
+; RV64-NEXT: srli a2, a2, 3
+; RV64-NEXT: sltu a1, a1, a3
+; RV64-NEXT: addi a1, a1, -1
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vx v0, v0, a2
+; RV64-NEXT: and a1, a1, a3
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i64> %eidxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index 647e3965b7ec27..2cf6248c175981 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -659,20 +659,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v14, v12
-; RV32-NEXT: vsll.vi v12, v14, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v14, v12, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v14, v12
-; RV64-NEXT: vsll.vi v12, v14, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v14, v12, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -683,10 +683,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @vpscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i32:
@@ -705,10 +706,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %bas
define void @vpscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i32:
@@ -728,18 +730,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v12, v16, 2
-; RV64-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v12, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %eidxs
@@ -757,10 +761,9 @@ define void @vpscatter_baseidx_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vsca
;
; RV64-LABEL: vpscatter_baseidx_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a2
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, ptr %base, <vscale x 8 x i32> %idxs
@@ -904,20 +907,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v18, v16
-; RV32-NEXT: vsll.vi v16, v18, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v18, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v18, v16
-; RV64-NEXT: vsll.vi v16, v18, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v18, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -928,11 +931,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @vpscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8i64:
@@ -950,11 +953,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %bas
define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8i64:
@@ -973,20 +976,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr
define void @vpscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v20, v16
-; RV64-NEXT: vsll.vi v16, v20, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v20, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1005,10 +1008,11 @@ define void @vpscatter_baseidx_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr %bas
;
; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.vp.scatter.nxv8i64.nxv8p0(<vscale x 8 x i64> %val, <vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -1026,10 +1030,11 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1048,10 +1053,11 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, ptr
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, ptr %base, <vscale x 8 x i64> %eidxs
@@ -1572,20 +1578,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, pt
define void @vpscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v14, v12
-; RV32-NEXT: vsll.vi v12, v14, 2
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v14, v12, a2
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v14, v12
-; RV64-NEXT: vsll.vi v12, v14, 2
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v14, v12, a2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v14, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1596,10 +1602,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, pt
define void @vpscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f32:
@@ -1618,10 +1625,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %b
define void @vpscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f32:
@@ -1641,18 +1649,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, p
define void @vpscatter_baseidx_zext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v16, v12
-; RV32-NEXT: vsll.vi v12, v16, 2
-; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV32-NEXT: li a2, 4
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v16, v12, a2
+; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v12, v16, 2
-; RV64-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v16, v12, a2
+; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %eidxs
@@ -1670,10 +1680,9 @@ define void @vpscatter_baseidx_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vs
;
; RV64-LABEL: vpscatter_baseidx_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT: li a2, 4
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v16, v12, a2
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, ptr %base, <vscale x 8 x i32> %idxs
@@ -1817,20 +1826,20 @@ define void @vpscatter_baseidx_sext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, p
define void @vpscatter_baseidx_zext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i8> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv6i8_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v18, v16
-; RV32-NEXT: vsll.vi v16, v18, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v18, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv6i8_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v18, v16
-; RV64-NEXT: vsll.vi v16, v18, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v18, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i8> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -1841,11 +1850,11 @@ define void @vpscatter_baseidx_zext_nxv6i8_nxv6f64(<vscale x 6 x double> %val, p
define void @vpscatter_baseidx_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv6i16_nxv6f64:
@@ -1863,11 +1872,11 @@ define void @vpscatter_baseidx_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %
define void @vpscatter_baseidx_sext_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv6i16_nxv6f64:
@@ -1886,20 +1895,20 @@ define void @vpscatter_baseidx_sext_nxv6i16_nxv6f64(<vscale x 6 x double> %val,
define void @vpscatter_baseidx_zext_nxv6i16_nxv6f64(<vscale x 6 x double> %val, ptr %base, <vscale x 6 x i16> %idxs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv6i16_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv6i16_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v20, v16
-; RV64-NEXT: vsll.vi v16, v20, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v20, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i16> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -1918,10 +1927,11 @@ define void @vpscatter_baseidx_nxv6i32_nxv6f64(<vscale x 6 x double> %val, ptr %
;
; RV64-LABEL: vpscatter_baseidx_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i32> %idxs
call void @llvm.vp.scatter.nxv6f64.nxv6p0(<vscale x 6 x double> %val, <vscale x 6 x ptr> %ptrs, <vscale x 6 x i1> %m, i32 %evl)
@@ -1939,10 +1949,11 @@ define void @vpscatter_baseidx_sext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -1961,10 +1972,11 @@ define void @vpscatter_baseidx_zext_nxv6i32_nxv6f64(<vscale x 6 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv6i32_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 6 x i64> %eidxs
@@ -2059,20 +2071,20 @@ define void @vpscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, p
define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV32-NEXT: vzext.vf2 v18, v16
-; RV32-NEXT: vsll.vi v16, v18, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT: vwmulu.vx v18, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i8_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; RV64-NEXT: vzext.vf2 v18, v16
-; RV64-NEXT: vsll.vi v16, v18, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT: vwmulu.vx v18, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei16.v v8, (a0), v18, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2083,11 +2095,11 @@ define void @vpscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, p
define void @vpscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv8i16_nxv8f64:
@@ -2105,11 +2117,11 @@ define void @vpscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %
define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vsext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulsu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i16_nxv8f64:
@@ -2128,20 +2140,20 @@ define void @vpscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val,
define void @vpscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, ptr %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf2 v20, v16
-; RV32-NEXT: vsll.vi v16, v20, 3
+; RV32-NEXT: li a2, 8
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV32-NEXT: vwmulu.vx v20, v16, a2
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV32-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i16_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf2 v20, v16
-; RV64-NEXT: vsll.vi v16, v20, 3
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; RV64-NEXT: vwmulu.vx v20, v16, a2
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; RV64-NEXT: vsoxei32.v v8, (a0), v20, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2160,10 +2172,11 @@ define void @vpscatter_baseidx_nxv8i32_nxv8f64(<vscale x 8 x double> %val, ptr %
;
; RV64-LABEL: vpscatter_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i32> %idxs
call void @llvm.vp.scatter.nxv8f64.nxv8p0(<vscale x 8 x double> %val, <vscale x 8 x ptr> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
@@ -2181,10 +2194,11 @@ define void @vpscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulsu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2203,10 +2217,11 @@ define void @vpscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val,
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v24, v16
-; RV64-NEXT: vsll.vi v16, v24, 3
-; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; RV64-NEXT: li a2, 8
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v16, a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 8 x i64> %eidxs
@@ -2307,25 +2322,16 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vl4re16.v v24, (a1)
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v0, v24
+; RV32-NEXT: vl4re16.v v4, (a1)
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: vsll.vi v24, v0, 3
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v4, a3
; RV32-NEXT: mv a3, a2
; RV32-NEXT: bltu a2, a1, .LBB109_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a1
; RV32-NEXT: .LBB109_2:
-; RV32-NEXT: addi a4, sp, 16
-; RV32-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: sub a3, a2, a1
@@ -2337,11 +2343,6 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_nxv16i16_nxv16f64:
@@ -2404,25 +2405,16 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %val, ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_sext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vl4re16.v v24, (a1)
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vsext.vf2 v0, v24
+; RV32-NEXT: vl4re16.v v4, (a1)
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: vsll.vi v24, v0, 3
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulsu.vx v24, v4, a3
; RV32-NEXT: mv a3, a2
; RV32-NEXT: bltu a2, a1, .LBB110_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a1
; RV32-NEXT: .LBB110_2:
-; RV32-NEXT: addi a4, sp, 16
-; RV32-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: sub a3, a2, a1
@@ -2434,11 +2426,6 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_sext_nxv16i16_nxv16f64:
@@ -2502,25 +2489,16 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %val, ptr %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpscatter_baseidx_zext_nxv16i16_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: sub sp, sp, a3
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vl4re16.v v24, (a1)
-; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV32-NEXT: vzext.vf2 v0, v24
+; RV32-NEXT: vl4re16.v v4, (a1)
+; RV32-NEXT: li a3, 8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: vsll.vi v24, v0, 3
+; RV32-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV32-NEXT: vwmulu.vx v24, v4, a3
; RV32-NEXT: mv a3, a2
; RV32-NEXT: bltu a2, a1, .LBB111_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a1
; RV32-NEXT: .LBB111_2:
-; RV32-NEXT: addi a4, sp, 16
-; RV32-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: sub a3, a2, a1
@@ -2532,34 +2510,20 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: add sp, sp, a0
-; RV32-NEXT: .cfi_def_cfa sp, 16
-; RV32-NEXT: addi sp, sp, 16
-; RV32-NEXT: .cfi_def_cfa_offset 0
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_baseidx_zext_nxv16i16_nxv16f64:
; RV64: # %bb.0:
-; RV64-NEXT: addi sp, sp, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
-; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
-; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vs1r.v v0, (a3) # Unknown-size Folded Spill
-; RV64-NEXT: vl4re16.v v24, (a1)
-; RV64-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; RV64-NEXT: vzext.vf2 v0, v24
+; RV64-NEXT: vl4re16.v v4, (a1)
+; RV64-NEXT: li a3, 8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: vsll.vi v24, v0, 3
+; RV64-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; RV64-NEXT: vwmulu.vx v24, v4, a3
; RV64-NEXT: mv a3, a2
; RV64-NEXT: bltu a2, a1, .LBB111_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a3, a1
; RV64-NEXT: .LBB111_2:
-; RV64-NEXT: addi a4, sp, 16
-; RV64-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV64-NEXT: sub a3, a2, a1
@@ -2571,11 +2535,6 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei32.v v16, (a0), v28, v0.t
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: add sp, sp, a0
-; RV64-NEXT: .cfi_def_cfa sp, 16
-; RV64-NEXT: addi sp, sp, 16
-; RV64-NEXT: .cfi_def_cfa_offset 0
; RV64-NEXT: ret
%eidxs = zext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i64> %eidxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
index ff807adf0e59f9..9524eaacb2eb75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
@@ -222,13 +222,6 @@ define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_zext(<vscale x 2 x i32> %a, i8 %b
}
define <vscale x 2 x i64> @vwsll_vi_nxv2i64(<vscale x 2 x i32> %a) {
-; CHECK-LABEL: vwsll_vi_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
-; CHECK-NEXT: ret
-;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64:
; CHECK-ZVBB: # %bb.0:
; CHECK-ZVBB-NEXT: vsetvli a0, zero, e32, m1, ta, ma
@@ -432,9 +425,10 @@ define <vscale x 4 x i32> @vwsll_vx_i8_nxv4i32_zext(<vscale x 4 x i16> %a, i8 %b
define <vscale x 4 x i32> @vwsll_vi_nxv4i32(<vscale x 4 x i16> %a) {
; CHECK-LABEL: vwsll_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv4i32:
@@ -612,9 +606,10 @@ define <vscale x 8 x i16> @vwsll_vx_i8_nxv8i16_zext(<vscale x 8 x i8> %a, i8 %b)
define <vscale x 8 x i16> @vwsll_vi_nxv8i16(<vscale x 8 x i8> %a) {
; CHECK-LABEL: vwsll_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2
+; CHECK-NEXT: li a0, 4
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a0
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv8i16:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
index c30c4763dd46d5..f1dc696510cfc7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
@@ -238,12 +238,20 @@ define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_zext(<vscale x 2 x i32> %a, i8 %b
}
define <vscale x 2 x i64> @vwsll_vi_nxv2i64(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vwsll_vi_nxv2i64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t
-; CHECK-NEXT: ret
+; CHECK-RV32-LABEL: vwsll_vi_nxv2i64:
+; CHECK-RV32: # %bb.0:
+; CHECK-RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-RV32-NEXT: vzext.vf2 v10, v8
+; CHECK-RV32-NEXT: vsll.vi v8, v10, 2, v0.t
+; CHECK-RV32-NEXT: ret
+;
+; CHECK-RV64-LABEL: vwsll_vi_nxv2i64:
+; CHECK-RV64: # %bb.0:
+; CHECK-RV64-NEXT: li a1, 4
+; CHECK-RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-RV64-NEXT: vwmulu.vx v10, v8, a1, v0.t
+; CHECK-RV64-NEXT: vmv2r.v v8, v10
+; CHECK-RV64-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64:
; CHECK-ZVBB: # %bb.0:
@@ -464,9 +472,10 @@ define <vscale x 4 x i32> @vwsll_vx_i8_nxv4i32_zext(<vscale x 4 x i16> %a, i8 %b
define <vscale x 4 x i32> @vwsll_vi_nxv4i32(<vscale x 4 x i16> %a, <vscale x 4 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vwsll_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv4i32:
@@ -661,9 +670,10 @@ define <vscale x 8 x i16> @vwsll_vx_i8_nxv8i16_zext(<vscale x 8 x i8> %a, i8 %b,
define <vscale x 8 x i16> @vwsll_vi_nxv8i16(<vscale x 8 x i8> %a, <vscale x 8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vwsll_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vzext.vf2 v10, v8
-; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t
+; CHECK-NEXT: li a1, 4
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vwmulu.vx v10, v8, a1, v0.t
+; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
;
; CHECK-ZVBB-LABEL: vwsll_vi_nxv8i16:
More information about the llvm-commits
mailing list