[llvm] 6e4860f - [SDAG] Add SimplifyDemandedBits support for ISD::SPLAT_VECTOR

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 28 02:36:05 PDT 2023


Author: Luke Lau
Date: 2023-08-28T10:35:56+01:00
New Revision: 6e4860f5d028e03419ec7d5d2e6127d3a57df050

URL: https://github.com/llvm/llvm-project/commit/6e4860f5d028e03419ec7d5d2e6127d3a57df050
DIFF: https://github.com/llvm/llvm-project/commit/6e4860f5d028e03419ec7d5d2e6127d3a57df050.diff

LOG: [SDAG] Add SimplifyDemandedBits support for ISD::SPLAT_VECTOR

This improves some cases where a splat_vector uses a build_pair that can be
simplified, e.g:

(rotl x:i64, splat_vector (build_pair x1:i32, x2:i32))

rotl only demands the bottom 6 bits, so this patch allows it to simplify it to:

(rotl x:i64, splat_vector (build_pair x1:i32, undef:i32))

Which in turn improves some cases where a splat_vector_parts is lowered on
RV32.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D158839

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll
    llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
    llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll
    llvm/test/CodeGen/WebAssembly/pr59626.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index d162c019aac8ae..fa030720dff33f 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1152,6 +1152,18 @@ bool TargetLowering::SimplifyDemandedBits(
     // TODO: Call SimplifyDemandedBits for non-constant demanded elements.
     Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth);
     return false; // Don't fall through, will infinitely loop.
+  case ISD::SPLAT_VECTOR: {
+    SDValue Scl = Op.getOperand(0);
+    APInt DemandedSclBits = DemandedBits.zextOrTrunc(Scl.getValueSizeInBits());
+    KnownBits KnownScl;
+    if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
+      return true;
+
+    // Implicitly truncate the bits to match the official semantics of
+    // SPLAT_VECTOR.
+    Known = KnownScl.trunc(BitWidth);
+    break;
+  }
   case ISD::LOAD: {
     auto *LD = cast<LoadSDNode>(Op);
     if (getTargetConstantFromLoad(LD)) {

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
index 8b3946136f90f0..13ebda1df7f9d1 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-select.ll
@@ -35,9 +35,9 @@ define void @select_v16f16(ptr %a, ptr %b, i1 %mask) vscale_range(2,0) #0 {
 ; CHECK-LABEL: select_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h, vl16
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.h, w2
 ; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0
@@ -55,10 +55,10 @@ define void @select_v32f16(ptr %a, ptr %b, i1 %mask) #0 {
 ; VBITS_GE_256-LABEL: select_v32f16:
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
-; VBITS_GE_256-NEXT:    and w8, w2, #0x1
-; VBITS_GE_256-NEXT:    ptrue p1.h
-; VBITS_GE_256-NEXT:    mov z0.h, w8
+; VBITS_GE_256-NEXT:    mov z0.h, w2
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
+; VBITS_GE_256-NEXT:    ptrue p1.h
+; VBITS_GE_256-NEXT:    and z0.h, z0.h, #0x1
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1, x8, lsl #1]
@@ -73,9 +73,9 @@ define void @select_v32f16(ptr %a, ptr %b, i1 %mask) #0 {
 ; VBITS_GE_512-LABEL: select_v32f16:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
-; VBITS_GE_512-NEXT:    and w8, w2, #0x1
+; VBITS_GE_512-NEXT:    mov z0.h, w2
 ; VBITS_GE_512-NEXT:    ptrue p1.h
-; VBITS_GE_512-NEXT:    mov z0.h, w8
+; VBITS_GE_512-NEXT:    and z0.h, z0.h, #0x1
 ; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; VBITS_GE_512-NEXT:    cmpne p1.h, p1/z, z0.h, #0
@@ -93,9 +93,9 @@ define void @select_v64f16(ptr %a, ptr %b, i1 %mask) vscale_range(8,0) #0 {
 ; CHECK-LABEL: select_v64f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h, vl64
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.h, w2
 ; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0
@@ -113,9 +113,9 @@ define void @select_v128f16(ptr %a, ptr %b, i1 %mask) vscale_range(16,0) #0 {
 ; CHECK-LABEL: select_v128f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h, vl128
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.h, w2
 ; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
index 30b680b174b983..710dce4de6dda3 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-select.ll
@@ -35,9 +35,8 @@ define void @select_v32i8(ptr %a, ptr %b, i1 %mask) vscale_range(2,0) #0 {
 ; CHECK-LABEL: select_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl32
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.b, w2
 ; CHECK-NEXT:    ptrue p1.b
-; CHECK-NEXT:    mov z0.b, w8
 ; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.b, p1/z, z0.b, #0
@@ -55,10 +54,9 @@ define void @select_v64i8(ptr %a, ptr %b, i1 %mask) #0 {
 ; VBITS_GE_256-LABEL: select_v64i8:
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.b, vl32
-; VBITS_GE_256-NEXT:    and w8, w2, #0x1
-; VBITS_GE_256-NEXT:    ptrue p1.b
-; VBITS_GE_256-NEXT:    mov z0.b, w8
+; VBITS_GE_256-NEXT:    mov z0.b, w2
 ; VBITS_GE_256-NEXT:    mov w8, #32 // =0x20
+; VBITS_GE_256-NEXT:    ptrue p1.b
 ; VBITS_GE_256-NEXT:    ld1b { z1.b }, p0/z, [x0, x8]
 ; VBITS_GE_256-NEXT:    ld1b { z2.b }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1b { z3.b }, p0/z, [x1, x8]
@@ -73,9 +71,8 @@ define void @select_v64i8(ptr %a, ptr %b, i1 %mask) #0 {
 ; VBITS_GE_512-LABEL: select_v64i8:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    ptrue p0.b, vl64
-; VBITS_GE_512-NEXT:    and w8, w2, #0x1
+; VBITS_GE_512-NEXT:    mov z0.b, w2
 ; VBITS_GE_512-NEXT:    ptrue p1.b
-; VBITS_GE_512-NEXT:    mov z0.b, w8
 ; VBITS_GE_512-NEXT:    ld1b { z1.b }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1b { z2.b }, p0/z, [x1]
 ; VBITS_GE_512-NEXT:    cmpne p1.b, p1/z, z0.b, #0
@@ -93,9 +90,8 @@ define void @select_v128i8(ptr %a, ptr %b, i1 %mask) vscale_range(8,0) #0 {
 ; CHECK-LABEL: select_v128i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl128
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.b, w2
 ; CHECK-NEXT:    ptrue p1.b
-; CHECK-NEXT:    mov z0.b, w8
 ; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.b, p1/z, z0.b, #0
@@ -113,9 +109,8 @@ define void @select_v256i8(ptr %a, ptr %b, i1 %mask) vscale_range(16,0) #0 {
 ; CHECK-LABEL: select_v256i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b, vl256
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.b, w2
 ; CHECK-NEXT:    ptrue p1.b
-; CHECK-NEXT:    mov z0.b, w8
 ; CHECK-NEXT:    ld1b { z1.b }, p0/z, [x0]
 ; CHECK-NEXT:    ld1b { z2.b }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.b, p1/z, z0.b, #0
@@ -159,9 +154,9 @@ define void @select_v16i16(ptr %a, ptr %b, i1 %mask) vscale_range(2,0) #0 {
 ; CHECK-LABEL: select_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h, vl16
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.h, w2
 ; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0
@@ -179,10 +174,10 @@ define void @select_v32i16(ptr %a, ptr %b, i1 %mask) #0 {
 ; VBITS_GE_256-LABEL: select_v32i16:
 ; VBITS_GE_256:       // %bb.0:
 ; VBITS_GE_256-NEXT:    ptrue p0.h, vl16
-; VBITS_GE_256-NEXT:    and w8, w2, #0x1
-; VBITS_GE_256-NEXT:    ptrue p1.h
-; VBITS_GE_256-NEXT:    mov z0.h, w8
+; VBITS_GE_256-NEXT:    mov z0.h, w2
 ; VBITS_GE_256-NEXT:    mov x8, #16 // =0x10
+; VBITS_GE_256-NEXT:    ptrue p1.h
+; VBITS_GE_256-NEXT:    and z0.h, z0.h, #0x1
 ; VBITS_GE_256-NEXT:    ld1h { z1.h }, p0/z, [x0, x8, lsl #1]
 ; VBITS_GE_256-NEXT:    ld1h { z2.h }, p0/z, [x0]
 ; VBITS_GE_256-NEXT:    ld1h { z3.h }, p0/z, [x1, x8, lsl #1]
@@ -197,9 +192,9 @@ define void @select_v32i16(ptr %a, ptr %b, i1 %mask) #0 {
 ; VBITS_GE_512-LABEL: select_v32i16:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
-; VBITS_GE_512-NEXT:    and w8, w2, #0x1
+; VBITS_GE_512-NEXT:    mov z0.h, w2
 ; VBITS_GE_512-NEXT:    ptrue p1.h
-; VBITS_GE_512-NEXT:    mov z0.h, w8
+; VBITS_GE_512-NEXT:    and z0.h, z0.h, #0x1
 ; VBITS_GE_512-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; VBITS_GE_512-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; VBITS_GE_512-NEXT:    cmpne p1.h, p1/z, z0.h, #0
@@ -217,9 +212,9 @@ define void @select_v64i16(ptr %a, ptr %b, i1 %mask) vscale_range(8,0) #0 {
 ; CHECK-LABEL: select_v64i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h, vl64
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.h, w2
 ; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0
@@ -237,9 +232,9 @@ define void @select_v128i16(ptr %a, ptr %b, i1 %mask) vscale_range(16,0) #0 {
 ; CHECK-LABEL: select_v128i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h, vl128
-; CHECK-NEXT:    and w8, w2, #0x1
+; CHECK-NEXT:    mov z0.h, w2
 ; CHECK-NEXT:    ptrue p1.h
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    ld1h { z1.h }, p0/z, [x0]
 ; CHECK-NEXT:    ld1h { z2.h }, p0/z, [x1]
 ; CHECK-NEXT:    cmpne p1.h, p1/z, z0.h, #0

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
index 0f79310a69f664..e10742cdea2556 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-select.ll
@@ -7,10 +7,10 @@ define <2 x half> @select_v2f16(<2 x half> %op1, <2 x half> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v2f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.h, w0
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    and z2.h, z2.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z2.h, #0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
@@ -23,10 +23,10 @@ define <4 x half> @select_v4f16(<4 x half> %op1, <4 x half> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v4f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.h, w0
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    and z2.h, z2.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z2.h, #0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
@@ -39,10 +39,10 @@ define <8 x half> @select_v8f16(<8 x half> %op1, <8 x half> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v8f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.h, w0
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    and z2.h, z2.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z2.h, #0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
@@ -55,8 +55,8 @@ define void @select_v16f16(ptr %a, ptr %b, i1 %mask) {
 ; CHECK-LABEL: select_v16f16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w2, #0x1
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    mov z0.h, w2
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x0, #16]

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll
index 5bcdaafc760dfb..d71ab37aaef8f7 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-select.ll
@@ -7,10 +7,10 @@ define <4 x i8> @select_v4i8(<4 x i8> %op1, <4 x i8> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v4i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.h, w0
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    and z2.h, z2.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z2.h, #0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
@@ -23,10 +23,9 @@ define <8 x i8> @select_v8i8(<8 x i8> %op1, <8 x i8> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v8i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.b, w0
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    mov z2.b, w8
 ; CHECK-NEXT:    cmpne p0.b, p0/z, z2.b, #0
 ; CHECK-NEXT:    sel z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
@@ -39,10 +38,9 @@ define <16 x i8> @select_v16i8(<16 x i8> %op1, <16 x i8> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.b, w0
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    mov z2.b, w8
 ; CHECK-NEXT:    cmpne p0.b, p0/z, z2.b, #0
 ; CHECK-NEXT:    sel z0.b, p0, z0.b, z1.b
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
@@ -55,8 +53,7 @@ define void @select_v32i8(ptr %a, ptr %b, i1 %mask) {
 ; CHECK-LABEL: select_v32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.b
-; CHECK-NEXT:    and w8, w2, #0x1
-; CHECK-NEXT:    mov z0.b, w8
+; CHECK-NEXT:    mov z0.b, w2
 ; CHECK-NEXT:    cmpne p0.b, p0/z, z0.b, #0
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x0, #16]
@@ -93,10 +90,10 @@ define <4 x i16> @select_v4i16(<4 x i16> %op1, <4 x i16> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v4i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.h, w0
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
 ; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
-; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    and z2.h, z2.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z2.h, #0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
@@ -109,10 +106,10 @@ define <8 x i16> @select_v8i16(<8 x i16> %op1, <8 x i16> %op2, i1 %mask) {
 ; CHECK-LABEL: select_v8i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w0, #0x1
+; CHECK-NEXT:    mov z2.h, w0
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
 ; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
-; CHECK-NEXT:    mov z2.h, w8
+; CHECK-NEXT:    and z2.h, z2.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z2.h, #0
 ; CHECK-NEXT:    sel z0.h, p0, z0.h, z1.h
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
@@ -125,8 +122,8 @@ define void @select_v16i16(ptr %a, ptr %b, i1 %mask) {
 ; CHECK-LABEL: select_v16i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ptrue p0.h
-; CHECK-NEXT:    and w8, w2, #0x1
-; CHECK-NEXT:    mov z0.h, w8
+; CHECK-NEXT:    mov z0.h, w2
+; CHECK-NEXT:    and z0.h, z0.h, #0x1
 ; CHECK-NEXT:    cmpne p0.h, p0/z, z0.h, #0
 ; CHECK-NEXT:    ldr q0, [x0]
 ; CHECK-NEXT:    ldr q1, [x0, #16]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
index 938fbfe88ecc19..9f8f1b224db9e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
@@ -65,15 +65,9 @@ define void @strided_store_zero_start(i64 %n, ptr %p) {
 define void @strided_store_offset_start(i64 %n, ptr %p) {
 ; RV32-LABEL: strided_store_offset_start:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    vsetvli a3, zero, e64, m1, ta, ma
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    vid.v v9
-; RV32-NEXT:    vadd.vv v8, v9, v8
+; RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT:    vid.v v8
+; RV32-NEXT:    vadd.vx v8, v8, a0
 ; RV32-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
 ; RV32-NEXT:    vnsrl.wi v8, v8, 0
 ; RV32-NEXT:    li a0, 48
@@ -82,7 +76,6 @@ define void @strided_store_offset_start(i64 %n, ptr %p) {
 ; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
 ; RV32-NEXT:    vmv.v.i v9, 0
 ; RV32-NEXT:    vsoxei32.v v9, (a0), v8
-; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: strided_store_offset_start:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll
index 107ddb8024e31b..394435f55cf20f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrol-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVBB
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVBB
 
 declare <vscale x 1 x i8> @llvm.fshl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>)
 
@@ -942,13 +942,8 @@ define <vscale x 1 x i64> @vrol_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x
 define <vscale x 1 x i64> @vrol_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vrol_vx_nxv1i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v9, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v10, v9, a0
 ; CHECK-RV32-NEXT:    vsll.vv v10, v8, v10
@@ -956,7 +951,6 @@ define <vscale x 1 x i64> @vrol_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v9, v9, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v9
 ; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vrol_vx_nxv1i64:
@@ -970,24 +964,11 @@ define <vscale x 1 x i64> @vrol_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v9, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vrol_vx_nxv1i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v9
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_nxv1i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vrol_vx_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 1 x i64> %b.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %x = call <vscale x 1 x i64> @llvm.fshl.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b.splat)
@@ -1021,13 +1002,8 @@ define <vscale x 2 x i64> @vrol_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x
 define <vscale x 2 x i64> @vrol_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vrol_vx_nxv2i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v10, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v12, v10, a0
 ; CHECK-RV32-NEXT:    vsll.vv v12, v8, v12
@@ -1035,7 +1011,6 @@ define <vscale x 2 x i64> @vrol_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v10, v10, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v10
 ; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vrol_vx_nxv2i64:
@@ -1049,24 +1024,11 @@ define <vscale x 2 x i64> @vrol_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vrol_vx_nxv2i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v10
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_nxv2i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vrol_vx_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 2 x i64> %b.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %x = call <vscale x 2 x i64> @llvm.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b.splat)
@@ -1100,13 +1062,8 @@ define <vscale x 4 x i64> @vrol_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x
 define <vscale x 4 x i64> @vrol_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vrol_vx_nxv4i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v12, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v16, v12, a0
 ; CHECK-RV32-NEXT:    vsll.vv v16, v8, v16
@@ -1114,7 +1071,6 @@ define <vscale x 4 x i64> @vrol_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v12, v12, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v12
 ; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vrol_vx_nxv4i64:
@@ -1128,24 +1084,11 @@ define <vscale x 4 x i64> @vrol_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v12, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vrol_vx_nxv4i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v12
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_nxv4i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vrol_vx_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 4 x i64> %b.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
   %x = call <vscale x 4 x i64> @llvm.fshl.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b.splat)
@@ -1179,13 +1122,8 @@ define <vscale x 8 x i64> @vrol_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x
 define <vscale x 8 x i64> @vrol_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vrol_vx_nxv8i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v16, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v24, v16, a0
 ; CHECK-RV32-NEXT:    vsll.vv v24, v8, v24
@@ -1193,7 +1131,6 @@ define <vscale x 8 x i64> @vrol_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v16, v16, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v8, v8, v16
 ; CHECK-RV32-NEXT:    vor.vv v8, v24, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vrol_vx_nxv8i64:
@@ -1207,24 +1144,11 @@ define <vscale x 8 x i64> @vrol_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v16, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vrol_vx_nxv8i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-ZVBB32-NEXT:    vrol.vv v8, v8, v16
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vrol_vx_nxv8i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-ZVBB64-NEXT:    vrol.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vrol_vx_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vrol.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 8 x i64> %b.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %x = call <vscale x 8 x i64> @llvm.fshl.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b.splat)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll
index b2663549e11722..b5f45bffa89a1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vror-sdnode.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
-; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVBB
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-ZVBB
 
 declare <vscale x 1 x i8> @llvm.fshr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>)
 declare <vscale x 1 x i8> @llvm.fshl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>)
@@ -1609,13 +1609,8 @@ define <vscale x 1 x i64> @vror_vv_nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x
 define <vscale x 1 x i64> @vror_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vror_vx_nxv1i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v9, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v9, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v10, v9, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v10, v8, v10
@@ -1623,7 +1618,6 @@ define <vscale x 1 x i64> @vror_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v9, v9, a0
 ; CHECK-RV32-NEXT:    vsll.vv v8, v8, v9
 ; CHECK-RV32-NEXT:    vor.vv v8, v10, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vror_vx_nxv1i64:
@@ -1637,24 +1631,11 @@ define <vscale x 1 x i64> @vror_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v9, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vror_vx_nxv1i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v9
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_nxv1i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vror_vx_nxv1i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 1 x i64> %b.head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %x = call <vscale x 1 x i64> @llvm.fshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %a, <vscale x 1 x i64> %b.splat)
@@ -1755,13 +1736,8 @@ define <vscale x 2 x i64> @vror_vv_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x
 define <vscale x 2 x i64> @vror_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vror_vx_nxv2i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v10, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v10, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v12, v10, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v12, v8, v12
@@ -1769,7 +1745,6 @@ define <vscale x 2 x i64> @vror_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v10, v10, a0
 ; CHECK-RV32-NEXT:    vsll.vv v8, v8, v10
 ; CHECK-RV32-NEXT:    vor.vv v8, v12, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vror_vx_nxv2i64:
@@ -1783,24 +1758,11 @@ define <vscale x 2 x i64> @vror_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v10, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vror_vx_nxv2i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v10
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_nxv2i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vror_vx_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 2 x i64> %b.head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
   %x = call <vscale x 2 x i64> @llvm.fshr.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b.splat)
@@ -1901,13 +1863,8 @@ define <vscale x 4 x i64> @vror_vv_nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x
 define <vscale x 4 x i64> @vror_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vror_vx_nxv4i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v12, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v16, v12, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v16, v8, v16
@@ -1915,7 +1872,6 @@ define <vscale x 4 x i64> @vror_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v12, v12, a0
 ; CHECK-RV32-NEXT:    vsll.vv v8, v8, v12
 ; CHECK-RV32-NEXT:    vor.vv v8, v16, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vror_vx_nxv4i64:
@@ -1929,24 +1885,11 @@ define <vscale x 4 x i64> @vror_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v12, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vror_vx_nxv4i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v12
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_nxv4i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vror_vx_nxv4i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 4 x i64> %b.head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
   %x = call <vscale x 4 x i64> @llvm.fshr.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %a, <vscale x 4 x i64> %b.splat)
@@ -2047,13 +1990,8 @@ define <vscale x 8 x i64> @vror_vv_nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x
 define <vscale x 8 x i64> @vror_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
 ; CHECK-RV32-LABEL: vror_vx_nxv8i64:
 ; CHECK-RV32:       # %bb.0:
-; CHECK-RV32-NEXT:    addi sp, sp, -16
-; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-RV32-NEXT:    sw a1, 12(sp)
-; CHECK-RV32-NEXT:    sw a0, 8(sp)
-; CHECK-RV32-NEXT:    addi a0, sp, 8
 ; CHECK-RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-RV32-NEXT:    vmv.v.x v16, a0
 ; CHECK-RV32-NEXT:    li a0, 63
 ; CHECK-RV32-NEXT:    vand.vx v24, v16, a0
 ; CHECK-RV32-NEXT:    vsrl.vv v24, v8, v24
@@ -2061,7 +1999,6 @@ define <vscale x 8 x i64> @vror_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
 ; CHECK-RV32-NEXT:    vand.vx v16, v16, a0
 ; CHECK-RV32-NEXT:    vsll.vv v8, v8, v16
 ; CHECK-RV32-NEXT:    vor.vv v8, v24, v8
-; CHECK-RV32-NEXT:    addi sp, sp, 16
 ; CHECK-RV32-NEXT:    ret
 ;
 ; CHECK-RV64-LABEL: vror_vx_nxv8i64:
@@ -2075,24 +2012,11 @@ define <vscale x 8 x i64> @vror_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b) {
 ; CHECK-RV64-NEXT:    vor.vv v8, v16, v8
 ; CHECK-RV64-NEXT:    ret
 ;
-; CHECK-ZVBB32-LABEL: vror_vx_nxv8i64:
-; CHECK-ZVBB32:       # %bb.0:
-; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
-; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
-; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
-; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
-; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
-; CHECK-ZVBB32-NEXT:    vror.vv v8, v8, v16
-; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
-; CHECK-ZVBB32-NEXT:    ret
-;
-; CHECK-ZVBB64-LABEL: vror_vx_nxv8i64:
-; CHECK-ZVBB64:       # %bb.0:
-; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-ZVBB64-NEXT:    vror.vx v8, v8, a0
-; CHECK-ZVBB64-NEXT:    ret
+; CHECK-ZVBB-LABEL: vror_vx_nxv8i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-ZVBB-NEXT:    vror.vx v8, v8, a0
+; CHECK-ZVBB-NEXT:    ret
   %b.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
   %b.splat = shufflevector <vscale x 8 x i64> %b.head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
   %x = call <vscale x 8 x i64> @llvm.fshr.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %a, <vscale x 8 x i64> %b.splat)

diff  --git a/llvm/test/CodeGen/WebAssembly/pr59626.ll b/llvm/test/CodeGen/WebAssembly/pr59626.ll
index 39941dd1024a49..1a93f56a32d92f 100644
--- a/llvm/test/CodeGen/WebAssembly/pr59626.ll
+++ b/llvm/test/CodeGen/WebAssembly/pr59626.ll
@@ -16,16 +16,13 @@ define i8 @f(ptr %0, ptr %1) {
 ; CHECK-32-NEXT:    i32.const 0
 ; CHECK-32-NEXT:    i32.store8 2
 ; CHECK-32-NEXT:    local.get 1
-; CHECK-32-NEXT:    local.get 0
-; CHECK-32-NEXT:    i8x16.splat
-; CHECK-32-NEXT:    v128.store16_lane 0, 0
-; CHECK-32-NEXT:    v128.const 0, 0
-; CHECK-32-NEXT:    i32x4.extract_lane 0
+; CHECK-32-NEXT:    i32.const 0
+; CHECK-32-NEXT:    i32.store16 0
+; CHECK-32-NEXT:    i32.const 0
 ; CHECK-32-NEXT:    # fallthrough-return
 ;
 ; CHECK-64-LABEL: f:
 ; CHECK-64:         .functype f (i64, i64) -> (i32)
-; CHECK-64-NEXT:    .local i32
 ; CHECK-64-NEXT:  # %bb.0: # %BB
 ; CHECK-64-NEXT:    local.get 0
 ; CHECK-64-NEXT:    i32.const 0
@@ -34,11 +31,9 @@ define i8 @f(ptr %0, ptr %1) {
 ; CHECK-64-NEXT:    i32.const 0
 ; CHECK-64-NEXT:    i32.store16 0
 ; CHECK-64-NEXT:    local.get 1
-; CHECK-64-NEXT:    local.get 2
-; CHECK-64-NEXT:    i8x16.splat
-; CHECK-64-NEXT:    v128.store16_lane 0, 0
-; CHECK-64-NEXT:    v128.const 0, 0
-; CHECK-64-NEXT:    i32x4.extract_lane 0
+; CHECK-64-NEXT:    i32.const 0
+; CHECK-64-NEXT:    i32.store16 0
+; CHECK-64-NEXT:    i32.const 0
 ; CHECK-64-NEXT:    # fallthrough-return
 BB:
   store <3 x i8> zeroinitializer, ptr %0


        


More information about the llvm-commits mailing list