[llvm] c141bd3 - [DAGCombiner] Support all-ones/all-zeros SPLAT_VECTOR in more combines

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 21 03:12:50 PDT 2021


Author: Fraser Cormack
Date: 2021-04-21T11:05:37+01:00
New Revision: c141bd3cf96681399a1c234e78ca71c5a98c6f40

URL: https://github.com/llvm/llvm-project/commit/c141bd3cf96681399a1c234e78ca71c5a98c6f40
DIFF: https://github.com/llvm/llvm-project/commit/c141bd3cf96681399a1c234e78ca71c5a98c6f40.diff

LOG: [DAGCombiner] Support all-ones/all-zeros SPLAT_VECTOR in more combines

This patch adds incrementally-better support for SPLAT_VECTOR in a
handful of vector combines by changing a few more
isBuildVectorAllOnes/isBuildVectorAllZeros to the equivalent
isConstantSplatVectorAllOnes/Zeros calls.

Reviewed By: paulwalker-arm

Differential Revision: https://reviews.llvm.org/D100851

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/sve-expand-div.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 18acb16dbe960..3fe508476532c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -5595,9 +5595,9 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
                              SDLoc(N), N1.getValueType());
 
     // fold (and x, -1) -> x, vector edition
-    if (ISD::isBuildVectorAllOnes(N0.getNode()))
+    if (ISD::isConstantSplatVectorAllOnes(N0.getNode()))
       return N1;
-    if (ISD::isBuildVectorAllOnes(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllOnes(N1.getNode()))
       return N0;
 
     // fold (and (masked_load) (build_vec (x, ...))) to zext_masked_load
@@ -6357,10 +6357,10 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
       return N0;
 
     // fold (or x, -1) -> -1, vector edition
-    if (ISD::isBuildVectorAllOnes(N0.getNode()))
+    if (ISD::isConstantSplatVectorAllOnes(N0.getNode()))
       // do not return N0, because undef node may exist in N0
       return DAG.getAllOnesConstant(SDLoc(N), N0.getValueType());
-    if (ISD::isBuildVectorAllOnes(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllOnes(N1.getNode()))
       // do not return N1, because undef node may exist in N1
       return DAG.getAllOnesConstant(SDLoc(N), N1.getValueType());
 
@@ -10051,11 +10051,11 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
   if (SimplifySelectOps(N, N1, N2))
     return SDValue(N, 0);  // Don't revisit N.
 
-  // Fold (vselect (build_vector all_ones), N1, N2) -> N1
-  if (ISD::isBuildVectorAllOnes(N0.getNode()))
+  // Fold (vselect all_ones, N1, N2) -> N1
+  if (ISD::isConstantSplatVectorAllOnes(N0.getNode()))
     return N1;
-  // Fold (vselect (build_vector all_zeros), N1, N2) -> N2
-  if (ISD::isBuildVectorAllZeros(N0.getNode()))
+  // Fold (vselect all_zeros, N1, N2) -> N2
+  if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
     return N2;
 
   // The ConvertSelectToConcatVector function is assuming both the above

diff  --git a/llvm/test/CodeGen/AArch64/sve-expand-div.ll b/llvm/test/CodeGen/AArch64/sve-expand-div.ll
index 185054a3ff3e3..4581166f7b5e8 100644
--- a/llvm/test/CodeGen/AArch64/sve-expand-div.ll
+++ b/llvm/test/CodeGen/AArch64/sve-expand-div.ll
@@ -14,8 +14,6 @@ define <vscale x 16 x i8> @sdiv_i8(<vscale x 16 x i8> %a) #0 {
 ; CHECK-NEXT:    ptrue p0.b
 ; CHECK-NEXT:    smulh z0.b, p0/m, z0.b, z1.b
 ; CHECK-NEXT:    lsr z1.b, z0.b, #7
-; CHECK-NEXT:    mov z2.b, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    and z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.b, z0.b, z1.b
 ; CHECK-NEXT:    ret
   %div = sdiv <vscale x 16 x i8> %a, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 3, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer)
@@ -26,12 +24,10 @@ define <vscale x 8 x i16> @sdiv_i16(<vscale x 8 x i16> %a) #0 {
 ; CHECK-LABEL: sdiv_i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #21846
-; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    mov z1.h, w8
+; CHECK-NEXT:    ptrue p0.h
 ; CHECK-NEXT:    smulh z0.h, p0/m, z0.h, z1.h
 ; CHECK-NEXT:    lsr z1.h, z0.h, #15
-; CHECK-NEXT:    mov z2.h, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    and z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
   %div = sdiv <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 3, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
@@ -43,12 +39,10 @@ define <vscale x 4 x i32> @sdiv_i32(<vscale x 4 x i32> %a) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov w8, #21846
 ; CHECK-NEXT:    movk w8, #21845, lsl #16
-; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    mov z1.s, w8
+; CHECK-NEXT:    ptrue p0.s
 ; CHECK-NEXT:    smulh z0.s, p0/m, z0.s, z1.s
 ; CHECK-NEXT:    lsr z1.s, z0.s, #31
-; CHECK-NEXT:    mov z2.s, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    and z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.s, z0.s, z1.s
 ; CHECK-NEXT:    ret
   %div = sdiv <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 3, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer)
@@ -60,12 +54,10 @@ define <vscale x 2 x i64> @sdiv_i64(<vscale x 2 x i64> %a) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov x8, #6148914691236517205
 ; CHECK-NEXT:    movk x8, #21846
-; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    mov z1.d, x8
+; CHECK-NEXT:    ptrue p0.d
 ; CHECK-NEXT:    smulh z0.d, p0/m, z0.d, z1.d
 ; CHECK-NEXT:    lsr z1.d, z0.d, #63
-; CHECK-NEXT:    mov z2.d, #-1 // =0xffffffffffffffff
-; CHECK-NEXT:    and z1.d, z1.d, z2.d
 ; CHECK-NEXT:    add z0.d, z0.d, z1.d
 ; CHECK-NEXT:    ret
   %div = sdiv <vscale x 2 x i64> %a, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 3, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
index f8b18f73fd213..3216789282a2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
@@ -33,7 +33,6 @@ define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
@@ -98,7 +97,6 @@ define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
@@ -139,7 +137,6 @@ define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
@@ -180,7 +177,6 @@ define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
@@ -221,7 +217,6 @@ define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
@@ -262,7 +257,6 @@ define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v8, v28, 7
-; CHECK-NEXT:    vand.vi v8, v8, -1
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
@@ -303,7 +297,6 @@ define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v8, v16, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 2
 ; CHECK-NEXT:    vsrl.vi v16, v8, 7
-; CHECK-NEXT:    vand.vi v16, v16, -1
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
@@ -343,7 +336,6 @@ define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
@@ -383,7 +375,6 @@ define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
@@ -423,7 +414,6 @@ define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
@@ -463,7 +453,6 @@ define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
@@ -503,7 +492,6 @@ define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v8, v28, 15
-; CHECK-NEXT:    vand.vi v8, v8, -1
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
@@ -543,7 +531,6 @@ define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    vsra.vi v8, v8, 1
 ; CHECK-NEXT:    vsrl.vi v16, v8, 15
-; CHECK-NEXT:    vand.vi v16, v16, -1
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
@@ -582,10 +569,9 @@ define <vscale x 1 x i32> @vdiv_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsub.vv v25, v25, v8
-; CHECK-NEXT:    vsra.vi v26, v25, 2
-; CHECK-NEXT:    vsrl.vi v25, v25, 31
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v8, v26, v25
+; CHECK-NEXT:    vsrl.vi v26, v25, 31
+; CHECK-NEXT:    vsra.vi v25, v25, 2
+; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
   %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
@@ -623,10 +609,9 @@ define <vscale x 2 x i32> @vdiv_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsub.vv v25, v25, v8
-; CHECK-NEXT:    vsra.vi v26, v25, 2
-; CHECK-NEXT:    vsrl.vi v25, v25, 31
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v8, v26, v25
+; CHECK-NEXT:    vsrl.vi v26, v25, 31
+; CHECK-NEXT:    vsra.vi v25, v25, 2
+; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
   %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
@@ -664,10 +649,9 @@ define <vscale x 4 x i32> @vdiv_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
 ; CHECK-NEXT:    vsub.vv v26, v26, v8
-; CHECK-NEXT:    vsra.vi v28, v26, 2
-; CHECK-NEXT:    vsrl.vi v26, v26, 31
-; CHECK-NEXT:    vand.vi v26, v26, -1
-; CHECK-NEXT:    vadd.vv v8, v28, v26
+; CHECK-NEXT:    vsrl.vi v28, v26, 31
+; CHECK-NEXT:    vsra.vi v26, v26, 2
+; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
   %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
@@ -705,10 +689,9 @@ define <vscale x 8 x i32> @vdiv_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
 ; CHECK-NEXT:    vsub.vv v28, v28, v8
-; CHECK-NEXT:    vsra.vi v8, v28, 2
-; CHECK-NEXT:    vsrl.vi v28, v28, 31
-; CHECK-NEXT:    vand.vi v28, v28, -1
-; CHECK-NEXT:    vadd.vv v8, v8, v28
+; CHECK-NEXT:    vsrl.vi v8, v28, 31
+; CHECK-NEXT:    vsra.vi v28, v28, 2
+; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
   %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
@@ -746,10 +729,9 @@ define <vscale x 16 x i32> @vdiv_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
 ; CHECK-NEXT:    vsub.vv v8, v16, v8
-; CHECK-NEXT:    vsra.vi v16, v8, 2
-; CHECK-NEXT:    vsrl.vi v8, v8, 31
-; CHECK-NEXT:    vand.vi v8, v8, -1
-; CHECK-NEXT:    vadd.vv v8, v16, v8
+; CHECK-NEXT:    vsrl.vi v16, v8, 31
+; CHECK-NEXT:    vsra.vi v8, v8, 2
+; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
   %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
@@ -802,11 +784,10 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
 ; CHECK-NEXT:    vor.vv v25, v26, v25
 ; CHECK-NEXT:    vmulh.vv v25, v8, v25
-; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v25, v25, a0
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v8, v26, v25
+; CHECK-NEXT:    vsrl.vx v26, v25, a0
+; CHECK-NEXT:    vsra.vi v25, v25, 1
+; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
@@ -859,11 +840,10 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
 ; CHECK-NEXT:    vor.vv v26, v28, v26
 ; CHECK-NEXT:    vmulh.vv v26, v8, v26
-; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v26, v26, a0
-; CHECK-NEXT:    vand.vi v26, v26, -1
-; CHECK-NEXT:    vadd.vv v8, v28, v26
+; CHECK-NEXT:    vsrl.vx v28, v26, a0
+; CHECK-NEXT:    vsra.vi v26, v26, 1
+; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
@@ -916,11 +896,10 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v12, v12, a0
 ; CHECK-NEXT:    vor.vv v28, v12, v28
 ; CHECK-NEXT:    vmulh.vv v28, v8, v28
-; CHECK-NEXT:    vsra.vi v8, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v28, v28, a0
-; CHECK-NEXT:    vand.vi v28, v28, -1
-; CHECK-NEXT:    vadd.vv v8, v8, v28
+; CHECK-NEXT:    vsrl.vx v8, v28, a0
+; CHECK-NEXT:    vsra.vi v28, v28, 1
+; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
@@ -973,11 +952,10 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v24, v24, a0
 ; CHECK-NEXT:    vor.vv v16, v24, v16
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    vsra.vi v16, v8, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    vand.vi v8, v8, -1
-; CHECK-NEXT:    vadd.vv v8, v16, v8
+; CHECK-NEXT:    vsrl.vx v16, v8, a0
+; CHECK-NEXT:    vsra.vi v8, v8, 1
+; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
index 96f832571023c..8297a5ba15d0d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
@@ -33,7 +33,6 @@ define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i8> undef, i8 -7, i32 0
@@ -74,7 +73,6 @@ define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i8> undef, i8 -7, i32 0
@@ -115,7 +113,6 @@ define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i8> undef, i8 -7, i32 0
@@ -156,7 +153,6 @@ define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i8> undef, i8 -7, i32 0
@@ -197,7 +193,6 @@ define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i8> undef, i8 -7, i32 0
@@ -238,7 +233,6 @@ define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v8, v28, 7
-; CHECK-NEXT:    vand.vi v8, v8, -1
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i8> undef, i8 -7, i32 0
@@ -279,7 +273,6 @@ define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v8, v16, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 2
 ; CHECK-NEXT:    vsrl.vi v16, v8, 7
-; CHECK-NEXT:    vand.vi v16, v16, -1
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 64 x i8> undef, i8 -7, i32 0
@@ -319,7 +312,6 @@ define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i16> undef, i16 -7, i32 0
@@ -359,7 +351,6 @@ define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i16> undef, i16 -7, i32 0
@@ -399,7 +390,6 @@ define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i16> undef, i16 -7, i32 0
@@ -439,7 +429,6 @@ define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i16> undef, i16 -7, i32 0
@@ -479,7 +468,6 @@ define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v8, v28, 15
-; CHECK-NEXT:    vand.vi v8, v8, -1
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i16> undef, i16 -7, i32 0
@@ -519,7 +507,6 @@ define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
 ; CHECK-NEXT:    vsra.vi v8, v8, 1
 ; CHECK-NEXT:    vsrl.vi v16, v8, 15
-; CHECK-NEXT:    vand.vi v16, v16, -1
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 32 x i16> undef, i16 -7, i32 0
@@ -561,7 +548,6 @@ define <vscale x 1 x i32> @vdiv_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i32> undef, i32 -7, i32 0
@@ -603,7 +589,6 @@ define <vscale x 2 x i32> @vdiv_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i32> undef, i32 -7, i32 0
@@ -645,7 +630,6 @@ define <vscale x 4 x i32> @vdiv_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 31
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i32> undef, i32 -7, i32 0
@@ -687,7 +671,6 @@ define <vscale x 8 x i32> @vdiv_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v8, v28, 31
-; CHECK-NEXT:    vand.vi v8, v8, -1
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i32> undef, i32 -7, i32 0
@@ -729,7 +712,6 @@ define <vscale x 16 x i32> @vdiv_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v8, v16, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 2
 ; CHECK-NEXT:    vsrl.vi v16, v8, 31
-; CHECK-NEXT:    vand.vi v16, v16, -1
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 16 x i32> undef, i32 -7, i32 0
@@ -773,11 +755,10 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v25, v25, a0
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v8, v26, v25
+; CHECK-NEXT:    vsrl.vx v26, v25, a0
+; CHECK-NEXT:    vsra.vi v25, v25, 1
+; CHECK-NEXT:    vadd.vv v8, v25, v26
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 1 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
@@ -820,11 +801,10 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v26, v26, a0
-; CHECK-NEXT:    vand.vi v26, v26, -1
-; CHECK-NEXT:    vadd.vv v8, v28, v26
+; CHECK-NEXT:    vsrl.vx v28, v26, a0
+; CHECK-NEXT:    vsra.vi v26, v26, 1
+; CHECK-NEXT:    vadd.vv v8, v26, v28
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 2 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
@@ -867,11 +847,10 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vsra.vi v8, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v28, v28, a0
-; CHECK-NEXT:    vand.vi v28, v28, -1
-; CHECK-NEXT:    vadd.vv v8, v8, v28
+; CHECK-NEXT:    vsrl.vx v8, v28, a0
+; CHECK-NEXT:    vsra.vi v28, v28, 1
+; CHECK-NEXT:    vadd.vv v8, v28, v8
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 4 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
@@ -914,11 +893,10 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    vsra.vi v16, v8, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    vand.vi v8, v8, -1
-; CHECK-NEXT:    vadd.vv v8, v16, v8
+; CHECK-NEXT:    vsrl.vx v16, v8, a0
+; CHECK-NEXT:    vsra.vi v8, v8, 1
+; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -7, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll
index e4a7ce1293dd2..9a8bbb0bfe670 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv32.ll
@@ -1111,7 +1111,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_3(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vor_vx_nxv8i64_3:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT:    vor.vi v8, v8, -1
+; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -1, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll
index d2a78ee5327d2..f9a8134b9a950 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-sdnode-rv64.ll
@@ -1084,7 +1084,7 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_3(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vor_vx_nxv8i64_3:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT:    vor.vi v8, v8, -1
+; CHECK-NEXT:    vmv.v.i v8, -1
 ; CHECK-NEXT:    ret
   %head = insertelement <vscale x 8 x i64> undef, i64 -1, i32 0
   %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
index b887e51c90a53..2a79b33034ee0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
@@ -33,7 +33,6 @@ define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -77,7 +76,6 @@ define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -121,7 +119,6 @@ define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -165,7 +162,6 @@ define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -209,7 +205,6 @@ define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
@@ -253,7 +248,6 @@ define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v12, v28, 7
-; CHECK-NEXT:    vand.vi v12, v12, -1
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
@@ -297,7 +291,6 @@ define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    vsra.vi v16, v16, 2
 ; CHECK-NEXT:    vsrl.vi v24, v16, 7
-; CHECK-NEXT:    vand.vi v24, v24, -1
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
@@ -340,7 +333,6 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -383,7 +375,6 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -426,7 +417,6 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -469,7 +459,6 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
@@ -512,7 +501,6 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v12, v28, 15
-; CHECK-NEXT:    vand.vi v12, v12, -1
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
@@ -555,7 +543,6 @@ define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
 ; CHECK-NEXT:    vsra.vi v16, v16, 1
 ; CHECK-NEXT:    vsrl.vi v24, v16, 15
-; CHECK-NEXT:    vand.vi v24, v24, -1
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
@@ -597,10 +584,9 @@ define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsub.vv v25, v25, v8
-; CHECK-NEXT:    vsra.vi v26, v25, 2
-; CHECK-NEXT:    vsrl.vi v25, v25, 31
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v25, v26, v25
+; CHECK-NEXT:    vsrl.vi v26, v25, 31
+; CHECK-NEXT:    vsra.vi v25, v25, 2
+; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v25
@@ -641,10 +627,9 @@ define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsub.vv v25, v25, v8
-; CHECK-NEXT:    vsra.vi v26, v25, 2
-; CHECK-NEXT:    vsrl.vi v25, v25, 31
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v25, v26, v25
+; CHECK-NEXT:    vsrl.vi v26, v25, 31
+; CHECK-NEXT:    vsra.vi v25, v25, 2
+; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v25
@@ -685,10 +670,9 @@ define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
 ; CHECK-NEXT:    vsub.vv v26, v26, v8
-; CHECK-NEXT:    vsra.vi v28, v26, 2
-; CHECK-NEXT:    vsrl.vi v26, v26, 31
-; CHECK-NEXT:    vand.vi v26, v26, -1
-; CHECK-NEXT:    vadd.vv v26, v28, v26
+; CHECK-NEXT:    vsrl.vi v28, v26, 31
+; CHECK-NEXT:    vsra.vi v26, v26, 2
+; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v26
@@ -729,10 +713,9 @@ define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
 ; CHECK-NEXT:    vsub.vv v28, v28, v8
-; CHECK-NEXT:    vsra.vi v12, v28, 2
-; CHECK-NEXT:    vsrl.vi v28, v28, 31
-; CHECK-NEXT:    vand.vi v28, v28, -1
-; CHECK-NEXT:    vadd.vv v28, v12, v28
+; CHECK-NEXT:    vsrl.vi v12, v28, 31
+; CHECK-NEXT:    vsra.vi v28, v28, 2
+; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v28
@@ -773,10 +756,9 @@ define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
 ; CHECK-NEXT:    vsub.vv v16, v16, v8
-; CHECK-NEXT:    vsra.vi v24, v16, 2
-; CHECK-NEXT:    vsrl.vi v16, v16, 31
-; CHECK-NEXT:    vand.vi v16, v16, -1
-; CHECK-NEXT:    vadd.vv v16, v24, v16
+; CHECK-NEXT:    vsrl.vi v24, v16, 31
+; CHECK-NEXT:    vsra.vi v16, v16, 2
+; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16
@@ -832,11 +814,10 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
 ; CHECK-NEXT:    vor.vv v25, v26, v25
 ; CHECK-NEXT:    vmulh.vv v25, v8, v25
-; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v25, v25, a0
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v25, v26, v25
+; CHECK-NEXT:    vsrl.vx v26, v25, a0
+; CHECK-NEXT:    vsra.vi v25, v25, 1
+; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v25
@@ -892,11 +873,10 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
 ; CHECK-NEXT:    vor.vv v26, v28, v26
 ; CHECK-NEXT:    vmulh.vv v26, v8, v26
-; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v26, v26, a0
-; CHECK-NEXT:    vand.vi v26, v26, -1
-; CHECK-NEXT:    vadd.vv v26, v28, v26
+; CHECK-NEXT:    vsrl.vx v28, v26, a0
+; CHECK-NEXT:    vsra.vi v26, v26, 1
+; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v26
@@ -952,11 +932,10 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v12, v12, a0
 ; CHECK-NEXT:    vor.vv v28, v12, v28
 ; CHECK-NEXT:    vmulh.vv v28, v8, v28
-; CHECK-NEXT:    vsra.vi v12, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v28, v28, a0
-; CHECK-NEXT:    vand.vi v28, v28, -1
-; CHECK-NEXT:    vadd.vv v28, v12, v28
+; CHECK-NEXT:    vsrl.vx v12, v28, a0
+; CHECK-NEXT:    vsra.vi v28, v28, 1
+; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v28
@@ -1012,11 +991,10 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v24, v24, a0
 ; CHECK-NEXT:    vor.vv v16, v24, v16
 ; CHECK-NEXT:    vmulh.vv v16, v8, v16
-; CHECK-NEXT:    vsra.vi v24, v16, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v16, v16, a0
-; CHECK-NEXT:    vand.vi v16, v16, -1
-; CHECK-NEXT:    vadd.vv v16, v24, v16
+; CHECK-NEXT:    vsrl.vx v24, v16, a0
+; CHECK-NEXT:    vsra.vi v16, v16, 1
+; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
index abbecd5de9768..b32684e3063e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
@@ -33,7 +33,6 @@ define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -77,7 +76,6 @@ define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -121,7 +119,6 @@ define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -165,7 +162,6 @@ define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -209,7 +205,6 @@ define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
@@ -253,7 +248,6 @@ define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v12, v28, 7
-; CHECK-NEXT:    vand.vi v12, v12, -1
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
@@ -297,7 +291,6 @@ define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    vsra.vi v16, v16, 2
 ; CHECK-NEXT:    vsrl.vi v24, v16, 7
-; CHECK-NEXT:    vand.vi v24, v24, -1
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
@@ -340,7 +333,6 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -383,7 +375,6 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -426,7 +417,6 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -469,7 +459,6 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
@@ -512,7 +501,6 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v12, v28, 15
-; CHECK-NEXT:    vand.vi v12, v12, -1
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
@@ -555,7 +543,6 @@ define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
 ; CHECK-NEXT:    vsra.vi v16, v16, 1
 ; CHECK-NEXT:    vsrl.vi v24, v16, 15
-; CHECK-NEXT:    vand.vi v24, v24, -1
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
@@ -600,7 +587,6 @@ define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -645,7 +631,6 @@ define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
-; CHECK-NEXT:    vand.vi v26, v26, -1
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
@@ -690,7 +675,6 @@ define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 31
-; CHECK-NEXT:    vand.vi v28, v28, -1
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
@@ -735,7 +719,6 @@ define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v12, v28, 31
-; CHECK-NEXT:    vand.vi v12, v12, -1
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
@@ -780,7 +763,6 @@ define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    vsra.vi v16, v16, 2
 ; CHECK-NEXT:    vsrl.vi v24, v16, 31
-; CHECK-NEXT:    vand.vi v24, v24, -1
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
@@ -827,11 +809,10 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v25, v25, a0
-; CHECK-NEXT:    vand.vi v25, v25, -1
-; CHECK-NEXT:    vadd.vv v25, v26, v25
+; CHECK-NEXT:    vsrl.vx v26, v25, a0
+; CHECK-NEXT:    vsra.vi v25, v25, 1
+; CHECK-NEXT:    vadd.vv v25, v25, v26
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v25, v25, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v25
@@ -877,11 +858,10 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v26, v26, a0
-; CHECK-NEXT:    vand.vi v26, v26, -1
-; CHECK-NEXT:    vadd.vv v26, v28, v26
+; CHECK-NEXT:    vsrl.vx v28, v26, a0
+; CHECK-NEXT:    vsra.vi v26, v26, 1
+; CHECK-NEXT:    vadd.vv v26, v26, v28
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v26, v26, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v26
@@ -927,11 +907,10 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vsra.vi v12, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v28, v28, a0
-; CHECK-NEXT:    vand.vi v28, v28, -1
-; CHECK-NEXT:    vadd.vv v28, v12, v28
+; CHECK-NEXT:    vsrl.vx v12, v28, a0
+; CHECK-NEXT:    vsra.vi v28, v28, 1
+; CHECK-NEXT:    vadd.vv v28, v28, v12
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v28, v28, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v28
@@ -977,11 +956,10 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vsra.vi v24, v16, 1
 ; CHECK-NEXT:    addi a0, zero, 63
-; CHECK-NEXT:    vsrl.vx v16, v16, a0
-; CHECK-NEXT:    vand.vi v16, v16, -1
-; CHECK-NEXT:    vadd.vv v16, v24, v16
+; CHECK-NEXT:    vsrl.vx v24, v16, a0
+; CHECK-NEXT:    vsra.vi v16, v16, 1
+; CHECK-NEXT:    vadd.vv v16, v16, v24
 ; CHECK-NEXT:    addi a0, zero, -7
 ; CHECK-NEXT:    vmul.vx v16, v16, a0
 ; CHECK-NEXT:    vsub.vv v8, v8, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
index ba93b9b6964c1..b2a7b2cff2cdf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
@@ -105,10 +105,6 @@ define <vscale x 8 x half> @vfmerge_zv_nxv8f16(<vscale x 8 x half> %va, <vscale
 define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; CHECK-LABEL: vmerge_truelhs_nxv8f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    ret
   %mhead = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
   %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
@@ -119,10 +115,7 @@ define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <v
 define <vscale x 8 x half> @vmerge_falselhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; CHECK-LABEL: vmerge_falselhs_nxv8f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
   %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
   ret <vscale x 8 x half> %vc

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
index 2808b17d28ba0..6200c75a14599 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
@@ -105,10 +105,6 @@ define <vscale x 8 x half> @vfmerge_zv_nxv8f16(<vscale x 8 x half> %va, <vscale
 define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; CHECK-LABEL: vmerge_truelhs_nxv8f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
 ; CHECK-NEXT:    ret
   %mhead = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
   %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
@@ -119,10 +115,7 @@ define <vscale x 8 x half> @vmerge_truelhs_nxv8f16_0(<vscale x 8 x half> %va, <v
 define <vscale x 8 x half> @vmerge_falselhs_nxv8f16_0(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
 ; CHECK-LABEL: vmerge_falselhs_nxv8f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v10, v8, v0
+; CHECK-NEXT:    vmv2r.v v8, v10
 ; CHECK-NEXT:    ret
   %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x half> %va, <vscale x 8 x half> %vb
   ret <vscale x 8 x half> %vc

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll
index 0fbde8d00ae64..4317a4561a11d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv32.ll
@@ -780,10 +780,6 @@ define <vscale x 8 x i64> @vmerge_iv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
 ; CHECK-LABEL: vmerge_truelhs_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    ret
   %mhead = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
   %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
@@ -794,10 +790,7 @@ define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vsc
 define <vscale x 8 x i64> @vmerge_falselhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
 ; CHECK-LABEL: vmerge_falselhs_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
   %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
   ret <vscale x 8 x i64> %vc

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll
index 079ae398d6219..13a6cc62ff236 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-int-rv64.ll
@@ -752,10 +752,6 @@ define <vscale x 8 x i64> @vmerge_iv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
 define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
 ; CHECK-LABEL: vmerge_truelhs_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmset.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
 ; CHECK-NEXT:    ret
   %mhead = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
   %mtrue = shufflevector <vscale x 8 x i1> %mhead, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
@@ -766,10 +762,7 @@ define <vscale x 8 x i64> @vmerge_truelhs_nxv8i64_0(<vscale x 8 x i64> %va, <vsc
 define <vscale x 8 x i64> @vmerge_falselhs_nxv8i64_0(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
 ; CHECK-LABEL: vmerge_falselhs_nxv8i64_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT:    vmclr.m v0
-; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT:    vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
   %vc = select <vscale x 8 x i1> zeroinitializer, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
   ret <vscale x 8 x i64> %vc


        


More information about the llvm-commits mailing list