[llvm] 1ef5699 - [DAGCombiner] Support fold zero scalar vector.

Jun Ma via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 20 01:29:17 PDT 2021


Author: Jun Ma
Date: 2021-04-20T16:28:43+08:00
New Revision: 1ef5699d1a86bf1864eb4633f752c073630c1641

URL: https://github.com/llvm/llvm-project/commit/1ef5699d1a86bf1864eb4633f752c073630c1641
DIFF: https://github.com/llvm/llvm-project/commit/1ef5699d1a86bf1864eb4633f752c073630c1641.diff

LOG: [DAGCombiner] Support fold zero scalar vector.

This patch changes ISD::isBuildVectorAllZeros to
ISD::isConstantSplatVectorAllZeros which handles zero sclar vector.

TestPlan: check-llvm

Differential Revision: https://reviews.llvm.org/D100813

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AArch64/sve-int-arith.ll
    llvm/test/CodeGen/AArch64/sve-int-log.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 0f53d8dddc02e..18acb16dbe960 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2260,9 +2260,9 @@ SDValue DAGCombiner::visitADDLike(SDNode *N) {
       return FoldedVOp;
 
     // fold (add x, 0) -> x, vector edition
-    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return N0;
-    if (ISD::isBuildVectorAllZeros(N0.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
       return N1;
   }
 
@@ -2543,9 +2543,9 @@ SDValue DAGCombiner::visitADDSAT(SDNode *N) {
     // TODO SimplifyVBinOp
 
     // fold (add_sat x, 0) -> x, vector edition
-    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return N0;
-    if (ISD::isBuildVectorAllZeros(N0.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
       return N1;
   }
 
@@ -3249,7 +3249,7 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
       return FoldedVOp;
 
     // fold (sub x, 0) -> x, vector edition
-    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return N0;
   }
 
@@ -3582,7 +3582,7 @@ SDValue DAGCombiner::visitSUBSAT(SDNode *N) {
     // TODO SimplifyVBinOp
 
     // fold (sub_sat x, 0) -> x, vector edition
-    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return N0;
   }
 
@@ -4438,8 +4438,8 @@ SDValue DAGCombiner::visitMULHS(SDNode *N) {
   if (VT.isVector()) {
     // fold (mulhs x, 0) -> 0
     // do not return N0/N1, because undef node may exist.
-    if (ISD::isBuildVectorAllZeros(N0.getNode()) ||
-        ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
+        ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return DAG.getConstant(0, DL, VT);
   }
 
@@ -4486,8 +4486,8 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
   if (VT.isVector()) {
     // fold (mulhu x, 0) -> 0
     // do not return N0/N1, because undef node may exist.
-    if (ISD::isBuildVectorAllZeros(N0.getNode()) ||
-        ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N0.getNode()) ||
+        ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return DAG.getConstant(0, DL, VT);
   }
 
@@ -5585,11 +5585,11 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
       return FoldedVOp;
 
     // fold (and x, 0) -> 0, vector edition
-    if (ISD::isBuildVectorAllZeros(N0.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
       // do not return N0, because undef node may exist in N0
       return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()),
                              SDLoc(N), N0.getValueType());
-    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       // do not return N1, because undef node may exist in N1
       return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()),
                              SDLoc(N), N1.getValueType());
@@ -6351,9 +6351,9 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
       return FoldedVOp;
 
     // fold (or x, 0) -> x, vector edition
-    if (ISD::isBuildVectorAllZeros(N0.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
       return N1;
-    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return N0;
 
     // fold (or x, -1) -> -1, vector edition
@@ -7712,9 +7712,9 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
       return FoldedVOp;
 
     // fold (xor x, 0) -> x, vector edition
-    if (ISD::isBuildVectorAllZeros(N0.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N0.getNode()))
       return N1;
-    if (ISD::isBuildVectorAllZeros(N1.getNode()))
+    if (ISD::isConstantSplatVectorAllZeros(N1.getNode()))
       return N0;
   }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
index a0f89d86b7398..2b0ce091a0902 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-arith.ll
@@ -37,6 +37,14 @@ define <vscale x 16 x i8> @add_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %res
 }
 
+define <vscale x 16 x i8> @add_i8_zero(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: add_i8_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %res = add <vscale x 16 x i8> %a, zeroinitializer
+  ret <vscale x 16 x i8> %res
+}
+
 define <vscale x 2 x i64> @sub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: sub_i64:
 ; CHECK:       // %bb.0:
@@ -73,6 +81,14 @@ define <vscale x 16 x i8> @sub_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
   ret <vscale x 16 x i8> %res
 }
 
+define <vscale x 16 x i8> @sub_i8_zero(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: sub_i8_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %res = sub <vscale x 16 x i8> %a, zeroinitializer
+  ret <vscale x 16 x i8> %res
+}
+
 define <vscale x 16 x i8> @abs_nxv16i8(<vscale x 16 x i8> %a) {
 ; CHECK-LABEL: abs_nxv16i8:
 ; CHECK:       // %bb.0:
@@ -166,6 +182,14 @@ define <vscale x 4 x i32> @sqadd_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %
   ret <vscale x 4 x i32> %res
 }
 
+define <vscale x 4 x i32> @sqadd_i32_zero(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: sqadd_i32_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>  %a, <vscale x 4 x i32> zeroinitializer)
+  ret <vscale x 4 x i32> %res
+}
+
 define <vscale x 8 x i16> @sqadd_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
 ; CHECK-LABEL: sqadd_i16:
 ; CHECK:       // %bb.0:
@@ -194,6 +218,14 @@ define <vscale x 2 x i64> @sqsub_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %
   ret <vscale x 2 x i64> %res
 }
 
+define <vscale x 2 x i64> @sqsub_i64_zero(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: sqsub_i64_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>  %a, <vscale x 2 x i64> zeroinitializer)
+  ret <vscale x 2 x i64> %res
+}
+
 define <vscale x 4 x i32> @sqsub_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
 ; CHECK-LABEL: sqsub_i32:
 ; CHECK:       // %bb.0:

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-log.ll b/llvm/test/CodeGen/AArch64/sve-int-log.ll
index 116112f525e10..2da05d30a9e02 100644
--- a/llvm/test/CodeGen/AArch64/sve-int-log.ll
+++ b/llvm/test/CodeGen/AArch64/sve-int-log.ll
@@ -37,6 +37,15 @@ define <vscale x 16 x i8> @and_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
   ret <vscale x 16 x i8> %res
 }
 
+define <vscale x 16 x i8> @and_b_zero(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: and_b_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.b, #0 // =0x0
+; CHECK-NEXT:    ret
+  %res = and <vscale x 16 x i8> %a, zeroinitializer
+  ret <vscale x 16 x i8> %res
+}
+
 define <vscale x 2 x i1> @and_pred_d(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
 ; CHECK-LABEL: and_pred_d:
 ; CHECK:       // %bb.0:
@@ -113,6 +122,14 @@ define <vscale x 16 x i8> @or_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
   ret <vscale x 16 x i8> %res
 }
 
+define <vscale x 16 x i8> @or_b_zero(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: or_b_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %res = or <vscale x 16 x i8> %a, zeroinitializer
+  ret <vscale x 16 x i8> %res
+}
+
 define <vscale x 2 x i1> @or_pred_d(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
 ; CHECK-LABEL: or_pred_d:
 ; CHECK:       // %bb.0:
@@ -189,6 +206,14 @@ define <vscale x 16 x i8> @xor_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
   ret <vscale x 16 x i8> %res
 }
 
+define <vscale x 16 x i8> @xor_b_zero(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: xor_b_zero:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %res = xor <vscale x 16 x i8> %a, zeroinitializer
+  ret <vscale x 16 x i8> %res
+}
+
 define <vscale x 2 x i1> @xor_pred_d(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) {
 ; CHECK-LABEL: xor_pred_d:
 ; CHECK:       // %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
index bbfc09d1c2767..f8b18f73fd213 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
@@ -341,7 +341,6 @@ define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -382,7 +381,6 @@ define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -423,7 +421,6 @@ define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -464,7 +461,6 @@ define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
 ; CHECK-NEXT:    vand.vi v28, v28, -1
@@ -505,7 +501,6 @@ define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v8, v28, 15
 ; CHECK-NEXT:    vand.vi v8, v8, -1
@@ -546,7 +541,6 @@ define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    vadd.vi v8, v8, 0
 ; CHECK-NEXT:    vsra.vi v8, v8, 1
 ; CHECK-NEXT:    vsrl.vi v16, v8, 15
 ; CHECK-NEXT:    vand.vi v16, v16, -1
@@ -808,7 +802,6 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
 ; CHECK-NEXT:    vor.vv v25, v26, v25
 ; CHECK-NEXT:    vmulh.vv v25, v8, v25
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v25, v25, a0
@@ -866,7 +859,6 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
 ; CHECK-NEXT:    vor.vv v26, v28, v26
 ; CHECK-NEXT:    vmulh.vv v26, v8, v26
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
@@ -924,7 +916,6 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v12, v12, a0
 ; CHECK-NEXT:    vor.vv v28, v12, v28
 ; CHECK-NEXT:    vmulh.vv v28, v8, v28
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v8, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
@@ -982,7 +973,6 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v24, v24, a0
 ; CHECK-NEXT:    vor.vv v16, v24, v16
 ; CHECK-NEXT:    vmulh.vv v8, v8, v16
-; CHECK-NEXT:    vadd.vi v8, v8, 0
 ; CHECK-NEXT:    vsra.vi v16, v8, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
index b8f331e78b5b7..96f832571023c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
@@ -317,7 +317,6 @@ define <vscale x 1 x i16> @vdiv_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -358,7 +357,6 @@ define <vscale x 2 x i16> @vdiv_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -399,7 +397,6 @@ define <vscale x 4 x i16> @vdiv_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -440,7 +437,6 @@ define <vscale x 8 x i16> @vdiv_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
 ; CHECK-NEXT:    vand.vi v28, v28, -1
@@ -481,7 +477,6 @@ define <vscale x 16 x i16> @vdiv_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v8, v28, 15
 ; CHECK-NEXT:    vand.vi v8, v8, -1
@@ -522,7 +517,6 @@ define <vscale x 32 x i16> @vdiv_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    vadd.vi v8, v8, 0
 ; CHECK-NEXT:    vsra.vi v8, v8, 1
 ; CHECK-NEXT:    vsrl.vi v16, v8, 15
 ; CHECK-NEXT:    vand.vi v16, v16, -1
@@ -779,7 +773,6 @@ define <vscale x 1 x i64> @vdiv_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v25, v25, a0
@@ -827,7 +820,6 @@ define <vscale x 2 x i64> @vdiv_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
@@ -875,7 +867,6 @@ define <vscale x 4 x i64> @vdiv_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v8, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
@@ -923,7 +914,6 @@ define <vscale x 8 x i64> @vdiv_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v8, v8, a0
-; CHECK-NEXT:    vadd.vi v8, v8, 0
 ; CHECK-NEXT:    vsra.vi v16, v8, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v8, v8, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
index 3a5a7b4209333..b887e51c90a53 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
@@ -338,7 +338,6 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -382,7 +381,6 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -426,7 +424,6 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -470,7 +467,6 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
 ; CHECK-NEXT:    vand.vi v28, v28, -1
@@ -514,7 +510,6 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v12, v28, 15
 ; CHECK-NEXT:    vand.vi v12, v12, -1
@@ -558,7 +553,6 @@ define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vadd.vi v16, v16, 0
 ; CHECK-NEXT:    vsra.vi v16, v16, 1
 ; CHECK-NEXT:    vsrl.vi v24, v16, 15
 ; CHECK-NEXT:    vand.vi v24, v24, -1
@@ -838,7 +832,6 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
 ; CHECK-NEXT:    vor.vv v25, v26, v25
 ; CHECK-NEXT:    vmulh.vv v25, v8, v25
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v25, v25, a0
@@ -899,7 +892,6 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
 ; CHECK-NEXT:    vor.vv v26, v28, v26
 ; CHECK-NEXT:    vmulh.vv v26, v8, v26
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
@@ -960,7 +952,6 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v12, v12, a0
 ; CHECK-NEXT:    vor.vv v28, v12, v28
 ; CHECK-NEXT:    vmulh.vv v28, v8, v28
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v12, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
@@ -1021,7 +1012,6 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    vsrl.vx v24, v24, a0
 ; CHECK-NEXT:    vor.vv v16, v24, v16
 ; CHECK-NEXT:    vmulh.vv v16, v8, v16
-; CHECK-NEXT:    vadd.vi v16, v16, 0
 ; CHECK-NEXT:    vsra.vi v24, v16, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v16, v16, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
index 2436671280187..abbecd5de9768 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
@@ -338,7 +338,6 @@ define <vscale x 1 x i16> @vrem_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -382,7 +381,6 @@ define <vscale x 2 x i16> @vrem_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -426,7 +424,6 @@ define <vscale x 4 x i16> @vrem_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v25, v25, 1
 ; CHECK-NEXT:    vsrl.vi v26, v25, 15
 ; CHECK-NEXT:    vand.vi v26, v26, -1
@@ -470,7 +467,6 @@ define <vscale x 8 x i16> @vrem_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v26, v26, 1
 ; CHECK-NEXT:    vsrl.vi v28, v26, 15
 ; CHECK-NEXT:    vand.vi v28, v28, -1
@@ -514,7 +510,6 @@ define <vscale x 16 x i16> @vrem_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v28, v28, 1
 ; CHECK-NEXT:    vsrl.vi v12, v28, 15
 ; CHECK-NEXT:    vand.vi v12, v12, -1
@@ -558,7 +553,6 @@ define <vscale x 32 x i16> @vrem_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
 ; CHECK-NEXT:    addiw a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vadd.vi v16, v16, 0
 ; CHECK-NEXT:    vsra.vi v16, v16, 1
 ; CHECK-NEXT:    vsrl.vi v24, v16, 15
 ; CHECK-NEXT:    vand.vi v24, v24, -1
@@ -833,7 +827,6 @@ define <vscale x 1 x i64> @vrem_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vadd.vi v25, v25, 0
 ; CHECK-NEXT:    vsra.vi v26, v25, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v25, v25, a0
@@ -884,7 +877,6 @@ define <vscale x 2 x i64> @vrem_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vadd.vi v26, v26, 0
 ; CHECK-NEXT:    vsra.vi v28, v26, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v26, v26, a0
@@ -935,7 +927,6 @@ define <vscale x 4 x i64> @vrem_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vadd.vi v28, v28, 0
 ; CHECK-NEXT:    vsra.vi v12, v28, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v28, v28, a0
@@ -986,7 +977,6 @@ define <vscale x 8 x i64> @vrem_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
 ; CHECK-NEXT:    addi a0, a0, 1755
 ; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vadd.vi v16, v16, 0
 ; CHECK-NEXT:    vsra.vi v24, v16, 1
 ; CHECK-NEXT:    addi a0, zero, 63
 ; CHECK-NEXT:    vsrl.vx v16, v16, a0


        


More information about the llvm-commits mailing list