[llvm] b889c6e - [DAG] Allow isNullOrNullSplat to see truncated zeroes

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 8 02:19:08 PDT 2021


Author: David Green
Date: 2021-06-08T10:18:58+01:00
New Revision: b889c6ee9911b72a58986d528f42dd18cbdf92d7

URL: https://github.com/llvm/llvm-project/commit/b889c6ee9911b72a58986d528f42dd18cbdf92d7
DIFF: https://github.com/llvm/llvm-project/commit/b889c6ee9911b72a58986d528f42dd18cbdf92d7.diff

LOG: [DAG] Allow isNullOrNullSplat to see truncated zeroes

This sets the AllowTruncation flag on isConstOrConstSplat in
isNullOrNullSplat, allowing it to see truncated constant zeroes on
architectures such as AArch64, where only a i32.i64 are legal. As a
truncation of 0 is always 0, this should always be valid, allowing some
extra folding to happen including some of the cases from D103755.

Differential Revision: https://reviews.llvm.org/D103756

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
    llvm/test/CodeGen/AArch64/vecreduce-bool.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 66fb6da8772f2..6d2827e85742d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -9601,7 +9601,8 @@ ConstantFPSDNode *llvm::isConstOrConstSplatFP(SDValue N,
 
 bool llvm::isNullOrNullSplat(SDValue N, bool AllowUndefs) {
   // TODO: may want to use peekThroughBitcast() here.
-  ConstantSDNode *C = isConstOrConstSplat(N, AllowUndefs);
+  ConstantSDNode *C =
+      isConstOrConstSplat(N, AllowUndefs, /*AllowTruncation=*/true);
   return C && C->isNullValue();
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
index 1b5692c01332c..98d5bf43b9664 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
@@ -96,9 +96,8 @@ define i32 @reduce_and_v16(<16 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 define i32 @reduce_and_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-LABEL: reduce_and_v32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    uminv b0, v0.16b
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    tst w8, #0x1
@@ -191,9 +190,8 @@ define i32 @reduce_or_v16(<16 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 define i32 @reduce_or_v32(<32 x i8> %a0, i32 %a1, i32 %a2) nounwind {
 ; CHECK-LABEL: reduce_or_v32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmlt v1.16b, v1.16b, #0
-; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    orr v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    cmlt v0.16b, v0.16b, #0
 ; CHECK-NEXT:    umaxv b0, v0.16b
 ; CHECK-NEXT:    fmov w8, s0
 ; CHECK-NEXT:    tst w8, #0x1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
index edf741e83f4a6..3a28a99993af2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv32.ll
@@ -29,8 +29,7 @@ define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -93,8 +92,7 @@ define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -133,8 +131,7 @@ define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -173,8 +170,7 @@ define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -213,8 +209,7 @@ define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vrsub.vi v28, v8, 0
-; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsub.vv v26, v26, v8
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
@@ -253,8 +248,7 @@ define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vrsub.vi v8, v8, 0
-; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vsub.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v8, v28, 7
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
@@ -293,8 +287,7 @@ define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vrsub.vi v8, v8, 0
-; CHECK-NEXT:    vadd.vv v8, v16, v8
+; CHECK-NEXT:    vsub.vv v8, v16, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 2
 ; CHECK-NEXT:    vsrl.vi v16, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
index 8297a5ba15d0d..393c394f2cece 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-sdnode-rv64.ll
@@ -29,8 +29,7 @@ define <vscale x 1 x i8> @vdiv_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -69,8 +68,7 @@ define <vscale x 2 x i8> @vdiv_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -109,8 +107,7 @@ define <vscale x 4 x i8> @vdiv_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -149,8 +146,7 @@ define <vscale x 8 x i8> @vdiv_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -189,8 +185,7 @@ define <vscale x 16 x i8> @vdiv_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vrsub.vi v28, v8, 0
-; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsub.vv v26, v26, v8
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
@@ -229,8 +224,7 @@ define <vscale x 32 x i8> @vdiv_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vrsub.vi v8, v8, 0
-; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vsub.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v8, v28, 7
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
@@ -269,8 +263,7 @@ define <vscale x 64 x i8> @vdiv_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vrsub.vi v8, v8, 0
-; CHECK-NEXT:    vadd.vv v8, v16, v8
+; CHECK-NEXT:    vsub.vv v8, v16, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 2
 ; CHECK-NEXT:    vsrl.vi v16, v8, 7
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
@@ -544,8 +537,7 @@ define <vscale x 1 x i32> @vdiv_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -585,8 +577,7 @@ define <vscale x 2 x i32> @vdiv_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
 ; CHECK-NEXT:    vadd.vv v8, v25, v26
@@ -626,8 +617,7 @@ define <vscale x 4 x i32> @vdiv_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vrsub.vi v28, v8, 0
-; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsub.vv v26, v26, v8
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 31
 ; CHECK-NEXT:    vadd.vv v8, v26, v28
@@ -667,8 +657,7 @@ define <vscale x 8 x i32> @vdiv_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vrsub.vi v8, v8, 0
-; CHECK-NEXT:    vadd.vv v28, v28, v8
+; CHECK-NEXT:    vsub.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v8, v28, 31
 ; CHECK-NEXT:    vadd.vv v8, v28, v8
@@ -708,8 +697,7 @@ define <vscale x 16 x i32> @vdiv_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vrsub.vi v8, v8, 0
-; CHECK-NEXT:    vadd.vv v8, v16, v8
+; CHECK-NEXT:    vsub.vv v8, v16, v8
 ; CHECK-NEXT:    vsra.vi v8, v8, 2
 ; CHECK-NEXT:    vsrl.vi v16, v8, 31
 ; CHECK-NEXT:    vadd.vv v8, v8, v16

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
index ba37c0cf04c76..3a5a90566bf80 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv32.ll
@@ -29,8 +29,7 @@ define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -72,8 +71,7 @@ define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -115,8 +113,7 @@ define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -158,8 +155,7 @@ define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -201,8 +197,7 @@ define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vrsub.vi v28, v8, 0
-; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsub.vv v26, v26, v8
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
@@ -244,8 +239,7 @@ define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vrsub.vi v12, v8, 0
-; CHECK-NEXT:    vadd.vv v28, v28, v12
+; CHECK-NEXT:    vsub.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v12, v28, 7
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
@@ -287,8 +281,7 @@ define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vrsub.vi v24, v8, 0
-; CHECK-NEXT:    vadd.vv v16, v16, v24
+; CHECK-NEXT:    vsub.vv v16, v16, v8
 ; CHECK-NEXT:    vsra.vi v16, v16, 2
 ; CHECK-NEXT:    vsrl.vi v24, v16, 7
 ; CHECK-NEXT:    vadd.vv v16, v16, v24

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
index b32684e3063e8..e5074ca5c7b7c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-sdnode-rv64.ll
@@ -29,8 +29,7 @@ define <vscale x 1 x i8> @vrem_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -72,8 +71,7 @@ define <vscale x 2 x i8> @vrem_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -115,8 +113,7 @@ define <vscale x 4 x i8> @vrem_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -158,8 +155,7 @@ define <vscale x 8 x i8> @vrem_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 7
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -201,8 +197,7 @@ define <vscale x 16 x i8> @vrem_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vrsub.vi v28, v8, 0
-; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsub.vv v26, v26, v8
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 7
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
@@ -244,8 +239,7 @@ define <vscale x 32 x i8> @vrem_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vrsub.vi v12, v8, 0
-; CHECK-NEXT:    vadd.vv v28, v28, v12
+; CHECK-NEXT:    vsub.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v12, v28, 7
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
@@ -287,8 +281,7 @@ define <vscale x 64 x i8> @vrem_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
 ; CHECK-NEXT:    addi a0, zero, 109
 ; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vrsub.vi v24, v8, 0
-; CHECK-NEXT:    vadd.vv v16, v16, v24
+; CHECK-NEXT:    vsub.vv v16, v16, v8
 ; CHECK-NEXT:    vsra.vi v16, v16, 2
 ; CHECK-NEXT:    vsrl.vi v24, v16, 7
 ; CHECK-NEXT:    vadd.vv v16, v16, v24
@@ -583,8 +576,7 @@ define <vscale x 1 x i32> @vrem_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -627,8 +619,7 @@ define <vscale x 2 x i32> @vrem_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vmulh.vx v25, v8, a0
-; CHECK-NEXT:    vrsub.vi v26, v8, 0
-; CHECK-NEXT:    vadd.vv v25, v25, v26
+; CHECK-NEXT:    vsub.vv v25, v25, v8
 ; CHECK-NEXT:    vsra.vi v25, v25, 2
 ; CHECK-NEXT:    vsrl.vi v26, v25, 31
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
@@ -671,8 +662,7 @@ define <vscale x 4 x i32> @vrem_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vmulh.vx v26, v8, a0
-; CHECK-NEXT:    vrsub.vi v28, v8, 0
-; CHECK-NEXT:    vadd.vv v26, v26, v28
+; CHECK-NEXT:    vsub.vv v26, v26, v8
 ; CHECK-NEXT:    vsra.vi v26, v26, 2
 ; CHECK-NEXT:    vsrl.vi v28, v26, 31
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
@@ -715,8 +705,7 @@ define <vscale x 8 x i32> @vrem_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vmulh.vx v28, v8, a0
-; CHECK-NEXT:    vrsub.vi v12, v8, 0
-; CHECK-NEXT:    vadd.vv v28, v28, v12
+; CHECK-NEXT:    vsub.vv v28, v28, v8
 ; CHECK-NEXT:    vsra.vi v28, v28, 2
 ; CHECK-NEXT:    vsrl.vi v12, v28, 31
 ; CHECK-NEXT:    vadd.vv v28, v28, v12
@@ -759,8 +748,7 @@ define <vscale x 16 x i32> @vrem_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
 ; CHECK-NEXT:    addiw a0, a0, -1171
 ; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vmulh.vx v16, v8, a0
-; CHECK-NEXT:    vrsub.vi v24, v8, 0
-; CHECK-NEXT:    vadd.vv v16, v16, v24
+; CHECK-NEXT:    vsub.vv v16, v16, v8
 ; CHECK-NEXT:    vsra.vi v16, v16, 2
 ; CHECK-NEXT:    vsrl.vi v24, v16, 31
 ; CHECK-NEXT:    vadd.vv v16, v16, v24


        


More information about the llvm-commits mailing list