[llvm] [RISCV] Use vwadd.vx for splat vector with extension (PR #87249)

via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 9 05:05:47 PDT 2024


https://github.com/sun-jacobi updated https://github.com/llvm/llvm-project/pull/87249

>From ab6a238b08a16ad0d996b8f5967d8d6a19844be2 Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Mon, 8 Apr 2024 23:03:10 +0900
Subject: [PATCH 1/4] [RISCV] add test patterns for splat vector with
 extension.

---
 llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll | 162 ++++++++++++++++++++
 1 file changed, 162 insertions(+)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
index 5dd01c654eff1d..4aaff199bd9b72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
@@ -1466,3 +1466,165 @@ define <vscale x 2 x i32> @vwadd_wv_disjoint_or(<vscale x 2 x i32> %x.i32, <vsca
   %or = or disjoint <vscale x 2 x i32> %x.i32, %y.i32
   ret <vscale x 2 x i32> %or
 }
+
+define <vscale x 8 x i64> @vwadd_vx_splat_zext(<vscale x 8 x i32> %va, i32 %b) {
+; RV32-LABEL: vwadd_vx_splat_zext:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vwaddu.wv v16, v16, v8
+; RV32-NEXT:    vmv8r.v v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwadd_vx_splat_zext:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v16, a0
+; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT:    vwaddu.wv v16, v16, v8
+; RV64-NEXT:    vmv8r.v v8, v16
+; RV64-NEXT:    ret
+  %zb = zext i32 %b to i64
+  %head = insertelement <vscale x 8 x i64> poison, i64 %zb, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %ve = add <vscale x 8 x i64> %vc, %splat
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i32> @vwadd_vx_splat_zext_i1(<vscale x 8 x i1> %va, i16 %b) {
+; RV32-LABEL: vwadd_vx_splat_zext_i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
+; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    vadd.vi v8, v8, 1, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwadd_vx_splat_zext_i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    vadd.vi v8, v8, 1, v0.t
+; RV64-NEXT:    ret
+  %zb = zext i16 %b to i32
+  %head = insertelement <vscale x 8 x i32> poison, i32 %zb, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = zext <vscale x 8 x i1> %va to <vscale x 8 x i32>
+  %ve = add <vscale x 8 x i32> %vc, %splat
+  ret <vscale x 8 x i32> %ve
+}
+
+define <vscale x 8 x i64> @vwadd_wx_splat_zext(<vscale x 8 x i64> %va, i32 %b) {
+; RV32-LABEL: vwadd_wx_splat_zext:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vlse64.v v16, (a0), zero
+; RV32-NEXT:    vadd.vv v8, v8, v16
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwadd_wx_splat_zext:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %zb = zext i32 %b to i64
+  %head = insertelement <vscale x 8 x i64> poison, i64 %zb, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %ve = add <vscale x 8 x i64> %va, %splat
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i64> @vwadd_vx_splat_sext(<vscale x 8 x i32> %va, i32 %b) {
+; RV32-LABEL: vwadd_vx_splat_sext:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vmv.v.x v16, a0
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32-NEXT:    vwadd.wv v16, v16, v8
+; RV32-NEXT:    vmv8r.v v8, v16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwadd_vx_splat_sext:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vmv.v.x v16, a0
+; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64-NEXT:    vwadd.wv v16, v16, v8
+; RV64-NEXT:    vmv8r.v v8, v16
+; RV64-NEXT:    ret
+  %sb = sext i32 %b to i64
+  %head = insertelement <vscale x 8 x i64> poison, i64 %sb, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %ve = add <vscale x 8 x i64> %vc, %splat
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i32> @vwadd_vx_splat_sext_i1(<vscale x 8 x i1> %va, i16 %b) {
+; RV32-LABEL: vwadd_vx_splat_sext_i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srai a0, a0, 16
+; RV32-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV32-NEXT:    vmv.v.x v8, a0
+; RV32-NEXT:    li a0, 1
+; RV32-NEXT:    vsub.vx v8, v8, a0, v0.t
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwadd_vx_splat_sext_i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srai a0, a0, 48
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; RV64-NEXT:    vmv.v.x v8, a0
+; RV64-NEXT:    li a0, 1
+; RV64-NEXT:    vsub.vx v8, v8, a0, v0.t
+; RV64-NEXT:    ret
+  %sb = sext i16 %b to i32
+  %head = insertelement <vscale x 8 x i32> poison, i32 %sb, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %vc = sext <vscale x 8 x i1> %va to <vscale x 8 x i32>
+  %ve = add <vscale x 8 x i32> %vc, %splat
+  ret <vscale x 8 x i32> %ve
+}
+
+define <vscale x 8 x i64> @vwadd_wx_splat_sext(<vscale x 8 x i64> %va, i32 %b) {
+; RV32-LABEL: vwadd_wx_splat_sext:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT:    vadd.vx v8, v8, a0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwadd_wx_splat_sext:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT:    vadd.vx v8, v8, a0
+; RV64-NEXT:    ret
+  %sb = sext i32 %b to i64
+  %head = insertelement <vscale x 8 x i64> poison, i64 %sb, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %ve = add <vscale x 8 x i64> %va, %splat
+  ret <vscale x 8 x i64> %ve
+}

>From 4067ab3b5c366125a4c6b6f6953648d315932571 Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Mon, 8 Apr 2024 23:16:34 +0900
Subject: [PATCH 2/4] [RISCV] use vwadd.vx for extended splat

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp |  85 ++++----
 llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll  | 120 ------------
 llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll  | 204 ++++++++++++--------
 llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll |  16 +-
 4 files changed, 181 insertions(+), 244 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 80cc41b458ca81..6e97575c167cd5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -13597,7 +13597,8 @@ struct NodeExtensionHelper {
 
   /// Check if this instance represents a splat.
   bool isSplat() const {
-    return OrigOperand.getOpcode() == RISCVISD::VMV_V_X_VL;
+    return OrigOperand.getOpcode() == RISCVISD::VMV_V_X_VL ||
+           OrigOperand.getOpcode() == ISD::SPLAT_VECTOR;
   }
 
   /// Get the extended opcode.
@@ -13641,6 +13642,8 @@ struct NodeExtensionHelper {
     case RISCVISD::VZEXT_VL:
     case RISCVISD::FP_EXTEND_VL:
       return DAG.getNode(ExtOpc, DL, NarrowVT, Source, Mask, VL);
+    case ISD::SPLAT_VECTOR:
+      return DAG.getSplat(NarrowVT, DL, Source.getOperand(0));
     case RISCVISD::VMV_V_X_VL:
       return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT,
                          DAG.getUNDEF(NarrowVT), Source.getOperand(1), VL);
@@ -13776,6 +13779,47 @@ struct NodeExtensionHelper {
   /// Check if this node needs to be fully folded or extended for all users.
   bool needToPromoteOtherUsers() const { return EnforceOneUse; }
 
+  void fillUpExtensionSupportForSplat(SDNode *Root, SelectionDAG &DAG,
+                                      const RISCVSubtarget &Subtarget) {
+    unsigned Opc = OrigOperand.getOpcode();
+    MVT VT = OrigOperand.getSimpleValueType();
+
+    assert((Opc == ISD::SPLAT_VECTOR || Opc == RISCVISD::VMV_V_X_VL) &&
+           "Unexpected Opcode");
+
+    // The pasthru must be undef for tail agnostic.
+    if (Opc == RISCVISD::VMV_V_X_VL && !OrigOperand.getOperand(0).isUndef())
+      return;
+
+    // Get the scalar value.
+    SDValue Op = Opc == ISD::SPLAT_VECTOR ? OrigOperand.getOperand(0)
+                                          : OrigOperand.getOperand(1);
+
+    // See if we have enough sign bits or zero bits in the scalar to use a
+    // widening opcode by splatting to smaller element size.
+    unsigned EltBits = VT.getScalarSizeInBits();
+    unsigned ScalarBits = Op.getValueSizeInBits();
+    // Make sure we're getting all element bits from the scalar register.
+    // FIXME: Support implicit sign extension of vmv.v.x?
+    if (ScalarBits < EltBits)
+      return;
+
+    unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
+    // If the narrow type cannot be expressed with a legal VMV,
+    // this is not a valid candidate.
+    if (NarrowSize < 8)
+      return;
+
+    if (DAG.ComputeMaxSignificantBits(Op) <= NarrowSize)
+      SupportsSExt = true;
+
+    if (DAG.MaskedValueIsZero(Op,
+                              APInt::getBitsSetFrom(ScalarBits, NarrowSize)))
+      SupportsZExt = true;
+
+    EnforceOneUse = false;
+  }
+
   /// Helper method to set the various fields of this struct based on the
   /// type of \p Root.
   void fillUpExtensionSupport(SDNode *Root, SelectionDAG &DAG,
@@ -13814,43 +13858,10 @@ struct NodeExtensionHelper {
     case RISCVISD::FP_EXTEND_VL:
       SupportsFPExt = true;
       break;
-    case RISCVISD::VMV_V_X_VL: {
-      // Historically, we didn't care about splat values not disappearing during
-      // combines.
-      EnforceOneUse = false;
-
-      // The operand is a splat of a scalar.
-
-      // The pasthru must be undef for tail agnostic.
-      if (!OrigOperand.getOperand(0).isUndef())
-        break;
-
-      // Get the scalar value.
-      SDValue Op = OrigOperand.getOperand(1);
-
-      // See if we have enough sign bits or zero bits in the scalar to use a
-      // widening opcode by splatting to smaller element size.
-      MVT VT = Root->getSimpleValueType(0);
-      unsigned EltBits = VT.getScalarSizeInBits();
-      unsigned ScalarBits = Op.getValueSizeInBits();
-      // Make sure we're getting all element bits from the scalar register.
-      // FIXME: Support implicit sign extension of vmv.v.x?
-      if (ScalarBits < EltBits)
-        break;
-
-      unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
-      // If the narrow type cannot be expressed with a legal VMV,
-      // this is not a valid candidate.
-      if (NarrowSize < 8)
-        break;
-
-      if (DAG.ComputeMaxSignificantBits(Op) <= NarrowSize)
-        SupportsSExt = true;
-      if (DAG.MaskedValueIsZero(Op,
-                                APInt::getBitsSetFrom(ScalarBits, NarrowSize)))
-        SupportsZExt = true;
+    case ISD::SPLAT_VECTOR:
+    case RISCVISD::VMV_V_X_VL:
+      fillUpExtensionSupportForSplat(Root, DAG, Subtarget);
       break;
-    }
     default:
       break;
     }
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index fc94f8c2a52797..95346b7f154fea 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -1229,22 +1229,6 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_nxv1i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v9, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v10, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v10, 23
-; CHECK-F-NEXT:    vwsubu.wv v9, v9, v8
-; CHECK-F-NEXT:    li a1, 64
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-F-NEXT:    vminu.vx v8, v9, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_nxv1i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -1370,22 +1354,6 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_nxv2i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v10, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v12, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v12, 23
-; CHECK-F-NEXT:    vwsubu.wv v10, v10, v8
-; CHECK-F-NEXT:    li a1, 64
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-F-NEXT:    vminu.vx v8, v10, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_nxv2i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -1511,22 +1479,6 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_nxv4i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v12, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v16, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v16, 23
-; CHECK-F-NEXT:    vwsubu.wv v12, v12, v8
-; CHECK-F-NEXT:    li a1, 64
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-F-NEXT:    vminu.vx v8, v12, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_nxv4i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -1652,22 +1604,6 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_nxv8i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v16, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v24, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v24, 23
-; CHECK-F-NEXT:    vwsubu.wv v16, v16, v8
-; CHECK-F-NEXT:    li a1, 64
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-F-NEXT:    vminu.vx v8, v16, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_nxv8i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -2835,20 +2771,6 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_zero_undef_nxv1i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v9, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v10, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v10, 23
-; CHECK-F-NEXT:    vwsubu.wv v9, v9, v8
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    vmv1r.v v8, v9
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv1i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -2971,20 +2893,6 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_zero_undef_nxv2i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v10, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v12, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v12, 23
-; CHECK-F-NEXT:    vwsubu.wv v10, v10, v8
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    vmv2r.v v8, v10
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv2i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -3107,20 +3015,6 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_zero_undef_nxv4i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v12, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v16, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v16, 23
-; CHECK-F-NEXT:    vwsubu.wv v12, v12, v8
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    vmv4r.v v8, v12
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv4i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -3243,20 +3137,6 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: ctlz_zero_undef_nxv8i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    vmv8r.v v16, v8
-; CHECK-F-NEXT:    li a0, 190
-; CHECK-F-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-F-NEXT:    vmv.v.x v8, a0
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v24, v16
-; CHECK-F-NEXT:    vsrl.vi v16, v24, 23
-; CHECK-F-NEXT:    vwsubu.wv v8, v8, v16
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
-;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv8i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
index b14cde25aa85b2..d13f4d2dca1ff4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
@@ -1241,13 +1241,12 @@ define <vscale x 1 x i64> @cttz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64F-NEXT:    fsrmi a0, 1
 ; RV64F-NEXT:    vfncvt.f.xu.w v10, v9
 ; RV64F-NEXT:    vsrl.vi v9, v10, 23
-; RV64F-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; RV64F-NEXT:    vzext.vf2 v10, v9
 ; RV64F-NEXT:    li a1, 127
-; RV64F-NEXT:    vsub.vx v9, v10, a1
+; RV64F-NEXT:    vwsubu.vx v10, v9, a1
+; RV64F-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV64F-NEXT:    vmseq.vi v0, v8, 0
 ; RV64F-NEXT:    li a1, 64
-; RV64F-NEXT:    vmerge.vxm v8, v9, a1, v0
+; RV64F-NEXT:    vmerge.vxm v8, v10, a1, v0
 ; RV64F-NEXT:    fsrm a0
 ; RV64F-NEXT:    ret
 ;
@@ -1404,13 +1403,12 @@ define <vscale x 2 x i64> @cttz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64F-NEXT:    fsrmi a0, 1
 ; RV64F-NEXT:    vfncvt.f.xu.w v12, v10
 ; RV64F-NEXT:    vsrl.vi v10, v12, 23
-; RV64F-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; RV64F-NEXT:    vzext.vf2 v12, v10
 ; RV64F-NEXT:    li a1, 127
-; RV64F-NEXT:    vsub.vx v10, v12, a1
+; RV64F-NEXT:    vwsubu.vx v12, v10, a1
+; RV64F-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
 ; RV64F-NEXT:    vmseq.vi v0, v8, 0
 ; RV64F-NEXT:    li a1, 64
-; RV64F-NEXT:    vmerge.vxm v8, v10, a1, v0
+; RV64F-NEXT:    vmerge.vxm v8, v12, a1, v0
 ; RV64F-NEXT:    fsrm a0
 ; RV64F-NEXT:    ret
 ;
@@ -1567,13 +1565,12 @@ define <vscale x 4 x i64> @cttz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64F-NEXT:    fsrmi a0, 1
 ; RV64F-NEXT:    vfncvt.f.xu.w v16, v12
 ; RV64F-NEXT:    vsrl.vi v12, v16, 23
-; RV64F-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV64F-NEXT:    vzext.vf2 v16, v12
 ; RV64F-NEXT:    li a1, 127
-; RV64F-NEXT:    vsub.vx v12, v16, a1
+; RV64F-NEXT:    vwsubu.vx v16, v12, a1
+; RV64F-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
 ; RV64F-NEXT:    vmseq.vi v0, v8, 0
 ; RV64F-NEXT:    li a1, 64
-; RV64F-NEXT:    vmerge.vxm v8, v12, a1, v0
+; RV64F-NEXT:    vmerge.vxm v8, v16, a1, v0
 ; RV64F-NEXT:    fsrm a0
 ; RV64F-NEXT:    ret
 ;
@@ -1730,13 +1727,12 @@ define <vscale x 8 x i64> @cttz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64F-NEXT:    fsrmi a0, 1
 ; RV64F-NEXT:    vfncvt.f.xu.w v24, v16
 ; RV64F-NEXT:    vsrl.vi v16, v24, 23
-; RV64F-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; RV64F-NEXT:    vzext.vf2 v24, v16
 ; RV64F-NEXT:    li a1, 127
-; RV64F-NEXT:    vsub.vx v16, v24, a1
+; RV64F-NEXT:    vwsubu.vx v24, v16, a1
+; RV64F-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64F-NEXT:    vmseq.vi v0, v8, 0
 ; RV64F-NEXT:    li a1, 64
-; RV64F-NEXT:    vmerge.vxm v8, v16, a1, v0
+; RV64F-NEXT:    vmerge.vxm v8, v24, a1, v0
 ; RV64F-NEXT:    fsrm a0
 ; RV64F-NEXT:    ret
 ;
@@ -2891,21 +2887,35 @@ define <vscale x 1 x i64> @cttz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: cttz_zero_undef_nxv1i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-F-NEXT:    vrsub.vi v9, v8, 0
-; CHECK-F-NEXT:    vand.vv v8, v8, v9
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v9, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v9, 23
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-F-NEXT:    vzext.vf2 v9, v8
-; CHECK-F-NEXT:    li a1, 127
-; CHECK-F-NEXT:    vsub.vx v8, v9, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
+; RV32F-LABEL: cttz_zero_undef_nxv1i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV32F-NEXT:    vrsub.vi v9, v8, 0
+; RV32F-NEXT:    vand.vv v8, v8, v9
+; RV32F-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v9, v8
+; RV32F-NEXT:    vsrl.vi v8, v9, 23
+; RV32F-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV32F-NEXT:    vzext.vf2 v9, v8
+; RV32F-NEXT:    li a1, 127
+; RV32F-NEXT:    vsub.vx v8, v9, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: cttz_zero_undef_nxv1i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
+; RV64F-NEXT:    vrsub.vi v9, v8, 0
+; RV64F-NEXT:    vand.vv v8, v8, v9
+; RV64F-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v9, v8
+; RV64F-NEXT:    vsrl.vi v9, v9, 23
+; RV64F-NEXT:    li a1, 127
+; RV64F-NEXT:    vwsubu.vx v8, v9, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
 ;
 ; CHECK-D-LABEL: cttz_zero_undef_nxv1i64:
 ; CHECK-D:       # %bb.0:
@@ -3011,21 +3021,35 @@ define <vscale x 2 x i64> @cttz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: cttz_zero_undef_nxv2i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-F-NEXT:    vrsub.vi v10, v8, 0
-; CHECK-F-NEXT:    vand.vv v8, v8, v10
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v10, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v10, 23
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-F-NEXT:    vzext.vf2 v10, v8
-; CHECK-F-NEXT:    li a1, 127
-; CHECK-F-NEXT:    vsub.vx v8, v10, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
+; RV32F-LABEL: cttz_zero_undef_nxv2i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32F-NEXT:    vrsub.vi v10, v8, 0
+; RV32F-NEXT:    vand.vv v8, v8, v10
+; RV32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v10, v8
+; RV32F-NEXT:    vsrl.vi v8, v10, 23
+; RV32F-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV32F-NEXT:    vzext.vf2 v10, v8
+; RV32F-NEXT:    li a1, 127
+; RV32F-NEXT:    vsub.vx v8, v10, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: cttz_zero_undef_nxv2i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV64F-NEXT:    vrsub.vi v10, v8, 0
+; RV64F-NEXT:    vand.vv v8, v8, v10
+; RV64F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v10, v8
+; RV64F-NEXT:    vsrl.vi v10, v10, 23
+; RV64F-NEXT:    li a1, 127
+; RV64F-NEXT:    vwsubu.vx v8, v10, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
 ;
 ; CHECK-D-LABEL: cttz_zero_undef_nxv2i64:
 ; CHECK-D:       # %bb.0:
@@ -3131,21 +3155,35 @@ define <vscale x 4 x i64> @cttz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: cttz_zero_undef_nxv4i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-F-NEXT:    vrsub.vi v12, v8, 0
-; CHECK-F-NEXT:    vand.vv v8, v8, v12
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v12, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v12, 23
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-F-NEXT:    vzext.vf2 v12, v8
-; CHECK-F-NEXT:    li a1, 127
-; CHECK-F-NEXT:    vsub.vx v8, v12, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
+; RV32F-LABEL: cttz_zero_undef_nxv4i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV32F-NEXT:    vrsub.vi v12, v8, 0
+; RV32F-NEXT:    vand.vv v8, v8, v12
+; RV32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v12, v8
+; RV32F-NEXT:    vsrl.vi v8, v12, 23
+; RV32F-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV32F-NEXT:    vzext.vf2 v12, v8
+; RV32F-NEXT:    li a1, 127
+; RV32F-NEXT:    vsub.vx v8, v12, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: cttz_zero_undef_nxv4i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
+; RV64F-NEXT:    vrsub.vi v12, v8, 0
+; RV64F-NEXT:    vand.vv v8, v8, v12
+; RV64F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v12, v8
+; RV64F-NEXT:    vsrl.vi v12, v12, 23
+; RV64F-NEXT:    li a1, 127
+; RV64F-NEXT:    vwsubu.vx v8, v12, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
 ;
 ; CHECK-D-LABEL: cttz_zero_undef_nxv4i64:
 ; CHECK-D:       # %bb.0:
@@ -3251,21 +3289,35 @@ define <vscale x 8 x i64> @cttz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
-; CHECK-F-LABEL: cttz_zero_undef_nxv8i64:
-; CHECK-F:       # %bb.0:
-; CHECK-F-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-F-NEXT:    vrsub.vi v16, v8, 0
-; CHECK-F-NEXT:    vand.vv v8, v8, v16
-; CHECK-F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-F-NEXT:    fsrmi a0, 1
-; CHECK-F-NEXT:    vfncvt.f.xu.w v16, v8
-; CHECK-F-NEXT:    vsrl.vi v8, v16, 23
-; CHECK-F-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-F-NEXT:    vzext.vf2 v16, v8
-; CHECK-F-NEXT:    li a1, 127
-; CHECK-F-NEXT:    vsub.vx v8, v16, a1
-; CHECK-F-NEXT:    fsrm a0
-; CHECK-F-NEXT:    ret
+; RV32F-LABEL: cttz_zero_undef_nxv8i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV32F-NEXT:    vrsub.vi v16, v8, 0
+; RV32F-NEXT:    vand.vv v8, v8, v16
+; RV32F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v16, v8
+; RV32F-NEXT:    vsrl.vi v8, v16, 23
+; RV32F-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32F-NEXT:    vzext.vf2 v16, v8
+; RV32F-NEXT:    li a1, 127
+; RV32F-NEXT:    vsub.vx v8, v16, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: cttz_zero_undef_nxv8i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
+; RV64F-NEXT:    vrsub.vi v16, v8, 0
+; RV64F-NEXT:    vand.vv v8, v8, v16
+; RV64F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v16, v8
+; RV64F-NEXT:    vsrl.vi v16, v16, 23
+; RV64F-NEXT:    li a1, 127
+; RV64F-NEXT:    vwsubu.vx v8, v16, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
 ;
 ; CHECK-D-LABEL: cttz_zero_undef_nxv8i64:
 ; CHECK-D:       # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
index 4aaff199bd9b72..21ddf1a6e114d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwadd-sdnode.ll
@@ -1484,12 +1484,9 @@ define <vscale x 8 x i64> @vwadd_vx_splat_zext(<vscale x 8 x i32> %va, i32 %b) {
 ;
 ; RV64-LABEL: vwadd_vx_splat_zext:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    slli a0, a0, 32
-; RV64-NEXT:    srli a0, a0, 32
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.v.x v16, a0
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT:    vwaddu.wv v16, v16, v8
+; RV64-NEXT:    andi a0, a0, -1
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-NEXT:    vwaddu.vx v16, v8, a0
 ; RV64-NEXT:    vmv8r.v v8, v16
 ; RV64-NEXT:    ret
   %zb = zext i32 %b to i64
@@ -1566,11 +1563,8 @@ define <vscale x 8 x i64> @vwadd_vx_splat_sext(<vscale x 8 x i32> %va, i32 %b) {
 ;
 ; RV64-LABEL: vwadd_vx_splat_sext:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    sext.w a0, a0
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT:    vmv.v.x v16, a0
-; RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; RV64-NEXT:    vwadd.wv v16, v16, v8
+; RV64-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64-NEXT:    vwadd.vx v16, v8, a0
 ; RV64-NEXT:    vmv8r.v v8, v16
 ; RV64-NEXT:    ret
   %sb = sext i32 %b to i64

>From 0f5466b5b85645423dee4f3d885dc7efd9214f6a Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Tue, 9 Apr 2024 10:32:30 +0900
Subject: [PATCH 3/4] [RISCV] update ctlz-sdnode.ll

---
 llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll | 232 ++++++++++++++++++++-
 1 file changed, 230 insertions(+), 2 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index 95346b7f154fea..d756cfcf707728 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-ZVE64X,RV32,RV32I
 ; RUN: llc -mtriple=riscv64 -mattr=+zve64x -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-ZVE64X,RV64,RV64I
-; RUN: llc -mtriple=riscv32 -mattr=+zve64f,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-F,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+zve64f,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-F,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+zve64f,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-F,RV32F
+; RUN: llc -mtriple=riscv64 -mattr=+zve64f,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-F,RV64F
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-D,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-D,RV64
 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
@@ -1229,6 +1229,37 @@ define <vscale x 1 x i64> @ctlz_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_nxv1i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32F-NEXT:    vmv.v.x v9, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v10, v8
+; RV32F-NEXT:    vsrl.vi v8, v10, 23
+; RV32F-NEXT:    vwsubu.wv v9, v9, v8
+; RV32F-NEXT:    li a1, 64
+; RV32F-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV32F-NEXT:    vminu.vx v8, v9, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_nxv1i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64F-NEXT:    vmv.v.x v9, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v10, v8
+; RV64F-NEXT:    vsrl.vi v8, v10, 23
+; RV64F-NEXT:    vwsubu.vv v10, v9, v8
+; RV64F-NEXT:    li a1, 64
+; RV64F-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64F-NEXT:    vminu.vx v8, v10, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_nxv1i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -1354,6 +1385,37 @@ define <vscale x 2 x i64> @ctlz_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_nxv2i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV32F-NEXT:    vmv.v.x v10, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v12, v8
+; RV32F-NEXT:    vsrl.vi v8, v12, 23
+; RV32F-NEXT:    vwsubu.wv v10, v10, v8
+; RV32F-NEXT:    li a1, 64
+; RV32F-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV32F-NEXT:    vminu.vx v8, v10, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_nxv2i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64F-NEXT:    vmv.v.x v10, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v11, v8
+; RV64F-NEXT:    vsrl.vi v8, v11, 23
+; RV64F-NEXT:    vwsubu.vv v12, v10, v8
+; RV64F-NEXT:    li a1, 64
+; RV64F-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64F-NEXT:    vminu.vx v8, v12, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_nxv2i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -1479,6 +1541,37 @@ define <vscale x 4 x i64> @ctlz_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_nxv4i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV32F-NEXT:    vmv.v.x v12, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v16, v8
+; RV32F-NEXT:    vsrl.vi v8, v16, 23
+; RV32F-NEXT:    vwsubu.wv v12, v12, v8
+; RV32F-NEXT:    li a1, 64
+; RV32F-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV32F-NEXT:    vminu.vx v8, v12, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_nxv4i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64F-NEXT:    vmv.v.x v12, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v14, v8
+; RV64F-NEXT:    vsrl.vi v8, v14, 23
+; RV64F-NEXT:    vwsubu.vv v16, v12, v8
+; RV64F-NEXT:    li a1, 64
+; RV64F-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; RV64F-NEXT:    vminu.vx v8, v16, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_nxv4i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -1604,6 +1697,37 @@ define <vscale x 8 x i64> @ctlz_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_nxv8i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32F-NEXT:    vmv.v.x v16, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v24, v8
+; RV32F-NEXT:    vsrl.vi v8, v24, 23
+; RV32F-NEXT:    vwsubu.wv v16, v16, v8
+; RV32F-NEXT:    li a1, 64
+; RV32F-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV32F-NEXT:    vminu.vx v8, v16, a1
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_nxv8i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64F-NEXT:    vmv.v.x v16, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v20, v8
+; RV64F-NEXT:    vsrl.vi v8, v20, 23
+; RV64F-NEXT:    vwsubu.vv v24, v16, v8
+; RV64F-NEXT:    li a1, 64
+; RV64F-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; RV64F-NEXT:    vminu.vx v8, v24, a1
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_nxv8i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -2771,6 +2895,32 @@ define <vscale x 1 x i64> @ctlz_zero_undef_nxv1i64(<vscale x 1 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_zero_undef_nxv1i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32F-NEXT:    vmv.v.x v9, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v10, v8
+; RV32F-NEXT:    vsrl.vi v8, v10, 23
+; RV32F-NEXT:    vwsubu.wv v9, v9, v8
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    vmv1r.v v8, v9
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_zero_undef_nxv1i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
+; RV64F-NEXT:    vmv.v.x v9, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v10, v8
+; RV64F-NEXT:    vsrl.vi v10, v10, 23
+; RV64F-NEXT:    vwsubu.vv v8, v9, v10
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv1i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -2893,6 +3043,32 @@ define <vscale x 2 x i64> @ctlz_zero_undef_nxv2i64(<vscale x 2 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_zero_undef_nxv2i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV32F-NEXT:    vmv.v.x v10, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v12, v8
+; RV32F-NEXT:    vsrl.vi v8, v12, 23
+; RV32F-NEXT:    vwsubu.wv v10, v10, v8
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    vmv2r.v v8, v10
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_zero_undef_nxv2i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; RV64F-NEXT:    vmv.v.x v10, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v11, v8
+; RV64F-NEXT:    vsrl.vi v11, v11, 23
+; RV64F-NEXT:    vwsubu.vv v8, v10, v11
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv2i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -3015,6 +3191,32 @@ define <vscale x 4 x i64> @ctlz_zero_undef_nxv4i64(<vscale x 4 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_zero_undef_nxv4i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; RV32F-NEXT:    vmv.v.x v12, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v16, v8
+; RV32F-NEXT:    vsrl.vi v8, v16, 23
+; RV32F-NEXT:    vwsubu.wv v12, v12, v8
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    vmv4r.v v8, v12
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_zero_undef_nxv4i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; RV64F-NEXT:    vmv.v.x v12, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v14, v8
+; RV64F-NEXT:    vsrl.vi v14, v14, 23
+; RV64F-NEXT:    vwsubu.vv v8, v12, v14
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv4i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -3137,6 +3339,32 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
 ; RV64I-NEXT:    vsrl.vx v8, v8, a0
 ; RV64I-NEXT:    ret
 ;
+; RV32F-LABEL: ctlz_zero_undef_nxv8i64:
+; RV32F:       # %bb.0:
+; RV32F-NEXT:    vmv8r.v v16, v8
+; RV32F-NEXT:    li a0, 190
+; RV32F-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; RV32F-NEXT:    vmv.v.x v8, a0
+; RV32F-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; RV32F-NEXT:    fsrmi a0, 1
+; RV32F-NEXT:    vfncvt.f.xu.w v24, v16
+; RV32F-NEXT:    vsrl.vi v16, v24, 23
+; RV32F-NEXT:    vwsubu.wv v8, v8, v16
+; RV32F-NEXT:    fsrm a0
+; RV32F-NEXT:    ret
+;
+; RV64F-LABEL: ctlz_zero_undef_nxv8i64:
+; RV64F:       # %bb.0:
+; RV64F-NEXT:    li a0, 190
+; RV64F-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; RV64F-NEXT:    vmv.v.x v16, a0
+; RV64F-NEXT:    fsrmi a0, 1
+; RV64F-NEXT:    vfncvt.f.xu.w v20, v8
+; RV64F-NEXT:    vsrl.vi v20, v20, 23
+; RV64F-NEXT:    vwsubu.vv v8, v16, v20
+; RV64F-NEXT:    fsrm a0
+; RV64F-NEXT:    ret
+;
 ; CHECK-D-LABEL: ctlz_zero_undef_nxv8i64:
 ; CHECK-D:       # %bb.0:
 ; CHECK-D-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

>From 945c34025098bd343b6100ffefa995325f69310c Mon Sep 17 00:00:00 2001
From: sun-jacobi <sun1011jacobi at gmail.com>
Date: Tue, 9 Apr 2024 21:05:17 +0900
Subject: [PATCH 4/4] [RISCV] update vwsll-sdnode.ll

---
 llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll | 23 ++++++++++++++-------
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
index 72fc9c918f22c4..41ec2fc443d028 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,RV32ZVBB
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,RV64ZVBB
 
 ; ==============================================================================
 ; i32 -> i64
@@ -864,12 +864,19 @@ define <vscale x 2 x i64> @vwsll_vi_nxv2i64_nxv2i8(<vscale x 2 x i8> %a) {
 ; CHECK-NEXT:    vsll.vi v8, v10, 2
 ; CHECK-NEXT:    ret
 ;
-; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64_nxv2i8:
-; CHECK-ZVBB:       # %bb.0:
-; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-ZVBB-NEXT:    vzext.vf8 v10, v8
-; CHECK-ZVBB-NEXT:    vsll.vi v8, v10, 2
-; CHECK-ZVBB-NEXT:    ret
+; RV32ZVBB-LABEL: vwsll_vi_nxv2i64_nxv2i8:
+; RV32ZVBB:       # %bb.0:
+; RV32ZVBB-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVBB-NEXT:    vzext.vf8 v10, v8
+; RV32ZVBB-NEXT:    vsll.vi v8, v10, 2
+; RV32ZVBB-NEXT:    ret
+;
+; RV64ZVBB-LABEL: vwsll_vi_nxv2i64_nxv2i8:
+; RV64ZVBB:       # %bb.0:
+; RV64ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; RV64ZVBB-NEXT:    vzext.vf4 v10, v8
+; RV64ZVBB-NEXT:    vwsll.vi v8, v10, 2
+; RV64ZVBB-NEXT:    ret
   %x = zext <vscale x 2 x i8> %a to <vscale x 2 x i64>
   %z = shl <vscale x 2 x i64> %x, splat (i64 2)
   ret <vscale x 2 x i64> %z



More information about the llvm-commits mailing list