[llvm] 3a4ad45 - [DAGCombiner] Combine trunc (splat_vector x) -> splat_vector (trunc x)

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 30 07:23:04 PDT 2023


Author: Luke Lau
Date: 2023-08-30T15:22:57+01:00
New Revision: 3a4ad45a2c5e700b0245db85b0364f5a62fa4359

URL: https://github.com/llvm/llvm-project/commit/3a4ad45a2c5e700b0245db85b0364f5a62fa4359
DIFF: https://github.com/llvm/llvm-project/commit/3a4ad45a2c5e700b0245db85b0364f5a62fa4359.diff

LOG: [DAGCombiner] Combine trunc (splat_vector x) -> splat_vector (trunc x)

>From the discussion in https://reviews.llvm.org/D158853, moving the truncate
into the splat helps more splatted scalar operands get selected on RISC-V, and
also avoids the need for splat_vector_parts on RV32.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D159147

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 55de08bd4a357d..822351b549360c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -14460,6 +14460,16 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
     return DAG.getBuildVector(VT, DL, TruncOps);
   }
 
+  // trunc (splat_vector x) -> splat_vector (trunc x)
+  if (N0.getOpcode() == ISD::SPLAT_VECTOR &&
+      (!LegalTypes || TLI.isTypeLegal(VT.getScalarType())) &&
+      (!LegalOperations || TLI.isOperationLegal(ISD::SPLAT_VECTOR, VT))) {
+    SDLoc DL(N);
+    EVT SVT = VT.getScalarType();
+    return DAG.getSplatVector(
+        VT, DL, DAG.getNode(ISD::TRUNCATE, DL, SVT, N0->getOperand(0)));
+  }
+
   // Fold a series of buildvector, bitcast, and truncate if possible.
   // For example fold
   //   (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
index f19f0addd87c7a..4c1e53cf9a01bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+v -target-abi=ilp32 \
-; RUN:     -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV32 %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \
-; RUN:     -verify-machineinstrs < %s | FileCheck -check-prefixes=CHECK,RV64 %s
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 define <vscale x 1 x i32> @vnsrl_wx_i64_nxv1i32(<vscale x 1 x i64> %va, i64 %b) {
 ; CHECK-LABEL: vnsrl_wx_i64_nxv1i32:
@@ -634,27 +634,11 @@ define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
 }
 
 define <vscale x 1 x i16> @vnsrl_wx_i64_nxv1i16(<vscale x 1 x i32> %va, i64 %b) {
-; RV32-LABEL: vnsrl_wx_i64_nxv1i16:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vnsrl.wi v9, v9, 0
-; RV32-NEXT:    vsrl.vv v8, v8, v9
-; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vnsrl_wx_i64_nxv1i16:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
-; RV64-NEXT:    vnsrl.wx v8, v8, a0
-; RV64-NEXT:    ret
+; CHECK-LABEL: vnsrl_wx_i64_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vnsrl.wx v8, v8, a0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %vb = trunc <vscale x 1 x i64> %splat to <vscale x 1 x i32>
@@ -664,29 +648,11 @@ define <vscale x 1 x i16> @vnsrl_wx_i64_nxv1i16(<vscale x 1 x i32> %va, i64 %b)
 }
 
 define <vscale x 1 x i8> @vnsrl_wx_i64_nxv1i8(<vscale x 1 x i16> %va, i64 %b) {
-; RV32-LABEL: vnsrl_wx_i64_nxv1i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetvli a1, zero, e32, mf2, ta, ma
-; RV32-NEXT:    vlse64.v v9, (a0), zero
-; RV32-NEXT:    vnsrl.wi v9, v9, 0
-; RV32-NEXT:    vsetvli zero, zero, e16, mf4, ta, ma
-; RV32-NEXT:    vnsrl.wi v9, v9, 0
-; RV32-NEXT:    vsrl.vv v8, v8, v9
-; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, ma
-; RV32-NEXT:    vnsrl.wi v8, v8, 0
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    ret
-;
-; RV64-LABEL: vnsrl_wx_i64_nxv1i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
-; RV64-NEXT:    vnsrl.wx v8, v8, a0
-; RV64-NEXT:    ret
+; CHECK-LABEL: vnsrl_wx_i64_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vnsrl.wx v8, v8, a0
+; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %vb = trunc <vscale x 1 x i64> %splat to <vscale x 1 x i16>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
index db559d0f84f4dc..f45a281c2e00c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB
 
 ; ==============================================================================
 ; i32 -> i64
@@ -287,52 +287,19 @@ define <vscale x 4 x i32> @vwsll_vv_nxv4i32_zext(<vscale x 4 x i16> %a, <vscale
 }
 
 define <vscale x 4 x i32> @vwsll_vx_i64_nxv4i32(<vscale x 4 x i16> %a, i64 %b) {
-; CHECK-RV32-LABEL: vwsll_vx_i64_nxv4i32:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
-; CHECK-RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-RV32-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV32-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-RV32-NEXT:    vsll.vv v8, v10, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: vwsll_vx_i64_nxv4i32:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-RV64-NEXT:    vmv.v.x v12, a0
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV64-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-RV64-NEXT:    vsll.vv v8, v10, v8
-; CHECK-RV64-NEXT:    ret
-;
-; CHECK-ZVBB32-LABEL: vwsll_vx_i64_nxv4i32:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-ZVBB32-NEXT:    vzext.vf2 v10, v8
-; CHECK-ZVBB32-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-ZVBB32-NEXT:    vsll.vv v8, v10, v8
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
+; CHECK-LABEL: vwsll_vx_i64_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vx v8, v10, a0
+; CHECK-NEXT:    ret
 ;
-; CHECK-ZVBB64-LABEL: vwsll_vx_i64_nxv4i32:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-ZVBB64-NEXT:    vwsll.vx v10, v8, a0
-; CHECK-ZVBB64-NEXT:    vmv2r.v v8, v10
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
   %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
@@ -528,61 +495,19 @@ define <vscale x 8 x i16> @vwsll_vv_nxv8i16_zext(<vscale x 8 x i8> %a, <vscale x
 }
 
 define <vscale x 8 x i16> @vwsll_vx_i64_nxv8i16(<vscale x 8 x i8> %a, i64 %b) {
-; CHECK-RV32-LABEL: vwsll_vx_i64_nxv8i16:
-; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
-; CHECK-RV32-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-RV32-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-RV32-NEXT:    vnsrl.wi v12, v16, 0
-; CHECK-RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-RV32-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-RV32-NEXT:    vsll.vv v8, v10, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
-; CHECK-RV32-NEXT:    ret
-;
-; CHECK-RV64-LABEL: vwsll_vx_i64_nxv8i16:
-; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vmv.v.x v16, a0
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-RV64-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-RV64-NEXT:    vnsrl.wi v12, v16, 0
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-RV64-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-RV64-NEXT:    vsll.vv v8, v10, v8
-; CHECK-RV64-NEXT:    ret
-;
-; CHECK-ZVBB32-LABEL: vwsll_vx_i64_nxv8i16:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-ZVBB32-NEXT:    vzext.vf2 v10, v8
-; CHECK-ZVBB32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-ZVBB32-NEXT:    vnsrl.wi v12, v16, 0
-; CHECK-ZVBB32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-ZVBB32-NEXT:    vsll.vv v8, v10, v8
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
+; CHECK-LABEL: vwsll_vx_i64_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vx v8, v10, a0
+; CHECK-NEXT:    ret
 ;
-; CHECK-ZVBB64-LABEL: vwsll_vx_i64_nxv8i16:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
-; CHECK-ZVBB64-NEXT:    vwsll.vx v10, v8, a0
-; CHECK-ZVBB64-NEXT:    vmv2r.v v8, v10
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
@@ -594,12 +519,9 @@ define <vscale x 8 x i16> @vwsll_vx_i64_nxv8i16(<vscale x 8 x i8> %a, i64 %b) {
 define <vscale x 8 x i16> @vwsll_vx_i32_nxv8i16(<vscale x 8 x i8> %a, i32 %b) {
 ; CHECK-LABEL: vwsll_vx_i32_nxv8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.v.x v12, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-NEXT:    vsll.vv v8, v10, v8
+; CHECK-NEXT:    vsll.vx v8, v10, a0
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vwsll_vx_i32_nxv8i16:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
index 4dda230d940594..995adaf6e185ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-vp.ll
@@ -320,45 +320,25 @@ define <vscale x 4 x i32> @vwsll_vv_nxv4i32_zext(<vscale x 4 x i16> %a, <vscale
 define <vscale x 4 x i32> @vwsll_vx_i64_nxv4i32(<vscale x 4 x i16> %a, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %vl) {
 ; CHECK-RV32-LABEL: vwsll_vx_i64_nxv4i32:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
 ; CHECK-RV32-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV32-NEXT:    vnsrl.wi v8, v12, 0
 ; CHECK-RV32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-RV32-NEXT:    vsll.vv v8, v10, v8, v0.t
-; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vwsll_vx_i64_nxv4i32:
 ; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m4, ta, ma
-; CHECK-RV64-NEXT:    vmv.v.x v12, a0
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
 ; CHECK-RV64-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV64-NEXT:    vnsrl.wi v8, v12, 0
 ; CHECK-RV64-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-RV64-NEXT:    vsll.vv v8, v10, v8, v0.t
+; CHECK-RV64-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-RV64-NEXT:    ret
 ;
 ; CHECK-ZVBB32-LABEL: vwsll_vx_i64_nxv4i32:
 ; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-ZVBB32-NEXT:    vzext.vf2 v10, v8
-; CHECK-ZVBB32-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vsll.vv v8, v10, v8, v0.t
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-ZVBB32-NEXT:    vwsll.vx v10, v8, a0, v0.t
+; CHECK-ZVBB32-NEXT:    vmv2r.v v8, v10
 ; CHECK-ZVBB32-NEXT:    ret
 ;
 ; CHECK-ZVBB64-LABEL: vwsll_vx_i64_nxv4i32:
@@ -575,54 +555,25 @@ define <vscale x 8 x i16> @vwsll_vv_nxv8i16_zext(<vscale x 8 x i8> %a, <vscale x
 define <vscale x 8 x i16> @vwsll_vx_i64_nxv8i16(<vscale x 8 x i8> %a, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-RV32-LABEL: vwsll_vx_i64_nxv8i16:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
 ; CHECK-RV32-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-RV32-NEXT:    vnsrl.wi v12, v16, 0
-; CHECK-RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-RV32-NEXT:    vnsrl.wi v8, v12, 0
 ; CHECK-RV32-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
-; CHECK-RV32-NEXT:    vsll.vv v8, v10, v8, v0.t
-; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vwsll_vx_i64_nxv8i16:
 ; CHECK-RV64:       # %bb.0:
-; CHECK-RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
-; CHECK-RV64-NEXT:    vmv.v.x v16, a0
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-RV64-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
 ; CHECK-RV64-NEXT:    vzext.vf2 v10, v8
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-RV64-NEXT:    vnsrl.wi v12, v16, 0
-; CHECK-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-RV64-NEXT:    vnsrl.wi v8, v12, 0
 ; CHECK-RV64-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-RV64-NEXT:    vsll.vv v8, v10, v8, v0.t
+; CHECK-RV64-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-RV64-NEXT:    ret
 ;
 ; CHECK-ZVBB32-LABEL: vwsll_vx_i64_nxv8i16:
 ; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-ZVBB32-NEXT:    vzext.vf2 v10, v8
-; CHECK-ZVBB32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-ZVBB32-NEXT:    vnsrl.wi v12, v16, 0
-; CHECK-ZVBB32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vnsrl.wi v8, v12, 0
-; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vsll.vv v8, v10, v8, v0.t
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-ZVBB32-NEXT:    vwsll.vx v10, v8, a0, v0.t
+; CHECK-ZVBB32-NEXT:    vmv2r.v v8, v10
 ; CHECK-ZVBB32-NEXT:    ret
 ;
 ; CHECK-ZVBB64-LABEL: vwsll_vx_i64_nxv8i16:
@@ -642,13 +593,10 @@ define <vscale x 8 x i16> @vwsll_vx_i64_nxv8i16(<vscale x 8 x i8> %a, i64 %b, <v
 define <vscale x 8 x i16> @vwsll_vx_i32_nxv8i16(<vscale x 8 x i8> %a, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vwsll_vx_i32_nxv8i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a2, zero, e32, m4, ta, ma
-; CHECK-NEXT:    vmv.v.x v12, a0
-; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli a2, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vnsrl.wi v8, v12, 0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v10, v8, v0.t
+; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
 ; CHECK-NEXT:    ret
 ;
 ; CHECK-ZVBB-LABEL: vwsll_vx_i32_nxv8i16:


        


More information about the llvm-commits mailing list