[llvm] [RISCV] Add DAG combine for (vmv_s_x_vl (undef) (vmv_x_s X). (PR #90524)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 29 23:01:18 PDT 2024


https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/90524

>From 09563a1f6a152647bd149b2d04edc576f4a02df2 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 29 Apr 2024 14:48:14 -0700
Subject: [PATCH 1/4] [RISCV] Add DAG combine for (vmv_s_x_vl (undef) (vmv_x_s
 X).

We can use the original vector as long as the type of X matches the
result type of the vmv_s_x_vl.
---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp     |  5 +++++
 .../rvv/fixed-vector-i8-index-cornercase.ll     | 17 +++++------------
 2 files changed, 10 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 68f4ec5ef49f31..65f2736d9e47c5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16791,6 +16791,11 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     SDValue Scalar = N->getOperand(1);
     SDValue VL = N->getOperand(2);
 
+    if (Scalar.getOpcode() == RISCVISD::VMV_X_S &&
+        Scalar.getOperand(0).getValueType() == N->getValueType(0) &&
+        isa<ConstantSDNode>(N->getOperand(2)) && N->getOperand(0).isUndef())
+      return Scalar.getOperand(0);
+
     // Use M1 or smaller to avoid over constraining register allocation
     const MVT M1VT = getLMUL1VT(VT);
     if (M1VT.bitsLT(VT)) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index 2874db6debd740..875f4f239028b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -26,17 +26,13 @@ define <512 x i8> @single_source(<512 x i8> %a) {
 ; CHECK-NEXT:    vmv.v.x v8, a1
 ; CHECK-NEXT:    vslide1down.vx v8, v8, a0
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v17, v16, 5
-; CHECK-NEXT:    vmv.x.s a0, v17
-; CHECK-NEXT:    vmv.s.x v24, a0
+; CHECK-NEXT:    vslidedown.vi v24, v16, 5
 ; CHECK-NEXT:    li a0, 432
 ; CHECK-NEXT:    li a1, 431
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
 ; CHECK-NEXT:    vslideup.vx v8, v24, a1
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v16, v16, 4
-; CHECK-NEXT:    vmv.x.s a0, v16
-; CHECK-NEXT:    vmv.s.x v16, a0
 ; CHECK-NEXT:    li a0, 466
 ; CHECK-NEXT:    li a1, 465
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
@@ -109,20 +105,17 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
 ; CHECK-NEXT:    addi a1, sp, 512
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vse8.v v8, (a1)
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v0, v24, 5
 ; CHECK-NEXT:    vmv.x.s a1, v24
+; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vmv.v.x v8, a1
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v25, v24, 5
-; CHECK-NEXT:    vmv.x.s a1, v25
-; CHECK-NEXT:    vmv.s.x v0, a1
 ; CHECK-NEXT:    li a1, 432
 ; CHECK-NEXT:    li a2, 431
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    vslideup.vx v8, v0, a2
 ; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v24, v24, 4
-; CHECK-NEXT:    vmv.x.s a1, v24
-; CHECK-NEXT:    vmv.s.x v24, a1
 ; CHECK-NEXT:    li a1, 466
 ; CHECK-NEXT:    li a2, 465
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
@@ -130,9 +123,9 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
 ; CHECK-NEXT:    vslideup.vx v8, v24, a2
 ; CHECK-NEXT:    vmv.s.x v24, a1
 ; CHECK-NEXT:    li a1, 478
+; CHECK-NEXT:    li a2, 477
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    lbu a1, 1012(sp)
-; CHECK-NEXT:    li a2, 477
 ; CHECK-NEXT:    vslideup.vx v8, v24, a2
 ; CHECK-NEXT:    vmv.s.x v24, a1
 ; CHECK-NEXT:    li a1, 501

>From 190d1a478e2678d4d258f6f16ebd4185c4089c1c Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 29 Apr 2024 21:29:43 -0700
Subject: [PATCH 2/4] fixup! Remove constant check.

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 65f2736d9e47c5..0fbbb2b2bd66a2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16793,7 +16793,7 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
 
     if (Scalar.getOpcode() == RISCVISD::VMV_X_S &&
         Scalar.getOperand(0).getValueType() == N->getValueType(0) &&
-        isa<ConstantSDNode>(N->getOperand(2)) && N->getOperand(0).isUndef())
+        N->getOperand(0).isUndef())
       return Scalar.getOperand(0);
 
     // Use M1 or smaller to avoid over constraining register allocation

>From 78a75f56e6624adc89b4b701ab6c3a617a9f8d90 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 29 Apr 2024 22:11:30 -0700
Subject: [PATCH 3/4] fixup! Use Passthru.isUndef()

---
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0fbbb2b2bd66a2..fe8edcf39681db 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16791,9 +16791,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     SDValue Scalar = N->getOperand(1);
     SDValue VL = N->getOperand(2);
 
-    if (Scalar.getOpcode() == RISCVISD::VMV_X_S &&
-        Scalar.getOperand(0).getValueType() == N->getValueType(0) &&
-        N->getOperand(0).isUndef())
+    if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndef() &&
+        Scalar.getOperand(0).getValueType() == N->getValueType(0))
       return Scalar.getOperand(0);
 
     // Use M1 or smaller to avoid over constraining register allocation

>From c622f32db28a441c0129d4e65ec18b2ec253220a Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 29 Apr 2024 22:58:49 -0700
Subject: [PATCH 4/4] fixup! Rebase

---
 .../rvv/fixed-vectors-reduction-int-vp.ll     | 26 +++++++++----------
 .../CodeGen/RISCV/rvv/vreductions-int-vp.ll   |  7 ++---
 2 files changed, 14 insertions(+), 19 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index b874a4477f5d17..02a989a9699606 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -802,27 +802,25 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1>
 ; CHECK-LABEL: vpreduce_xor_v64i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
+; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vslidedown.vi v24, v0, 4
-; CHECK-NEXT:    addi a2, a1, -32
-; CHECK-NEXT:    sltu a3, a1, a2
-; CHECK-NEXT:    addi a3, a3, -1
-; CHECK-NEXT:    li a4, 32
-; CHECK-NEXT:    and a2, a3, a2
-; CHECK-NEXT:    bltu a1, a4, .LBB49_2
+; CHECK-NEXT:    mv a2, a1
+; CHECK-NEXT:    bltu a1, a3, .LBB49_2
 ; CHECK-NEXT:  # %bb.1:
-; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    li a2, 32
 ; CHECK-NEXT:  .LBB49_2:
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vmv.s.x v25, a0
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vredxor.vs v25, v8, v25, v0.t
-; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
-; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT:    vredxor.vs v25, v8, v25, v0.t
+; CHECK-NEXT:    addi a0, a1, -32
+; CHECK-NEXT:    sltu a1, a1, a0
+; CHECK-NEXT:    addi a1, a1, -1
+; CHECK-NEXT:    and a0, a1, a0
+; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vredxor.vs v8, v16, v8, v0.t
-; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vredxor.vs v25, v16, v25, v0.t
+; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = call i32 @llvm.vp.reduce.xor.v64i32(i32 %s, <64 x i32> %v, <64 x i1> %m, i32 %evl)
   ret i32 %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index 95b64cb662a614..7bcf37b1af3c8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -1115,13 +1115,10 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, <vscale x 32 x i32> %
 ; CHECK-NEXT:    vmv.s.x v25, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; CHECK-NEXT:    vredmaxu.vs v25, v8, v25, v0.t
-; CHECK-NEXT:    vmv.x.s a0, v25
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
-; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    vredmaxu.vs v8, v16, v8, v0.t
-; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    vredmaxu.vs v25, v16, v25, v0.t
+; CHECK-NEXT:    vmv.x.s a0, v25
 ; CHECK-NEXT:    ret
   %r = call i32 @llvm.vp.reduce.umax.nxv32i32(i32 %s, <vscale x 32 x i32> %v, <vscale x 32 x i1> %m, i32 %evl)
   ret i32 %r



More information about the llvm-commits mailing list