[llvm] 2524146 - [RISCV] Add DAG combine for (vmv_s_x_vl (undef) (vmv_x_s X). (#90524)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 29 23:35:34 PDT 2024
Author: Craig Topper
Date: 2024-04-29T23:35:30-07:00
New Revision: 2524146b256002bfc70b38008425291f5b3dd08c
URL: https://github.com/llvm/llvm-project/commit/2524146b256002bfc70b38008425291f5b3dd08c
DIFF: https://github.com/llvm/llvm-project/commit/2524146b256002bfc70b38008425291f5b3dd08c.diff
LOG: [RISCV] Add DAG combine for (vmv_s_x_vl (undef) (vmv_x_s X). (#90524)
We can use the original vector as long as the type of X matches the
result type of the vmv_s_x_vl.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 68f4ec5ef49f31..fe8edcf39681db 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -16791,6 +16791,10 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
SDValue Scalar = N->getOperand(1);
SDValue VL = N->getOperand(2);
+ if (Scalar.getOpcode() == RISCVISD::VMV_X_S && Passthru.isUndef() &&
+ Scalar.getOperand(0).getValueType() == N->getValueType(0))
+ return Scalar.getOperand(0);
+
// Use M1 or smaller to avoid over constraining register allocation
const MVT M1VT = getLMUL1VT(VT);
if (M1VT.bitsLT(VT)) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index 2874db6debd740..875f4f239028b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -26,17 +26,13 @@ define <512 x i8> @single_source(<512 x i8> %a) {
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v17, v16, 5
-; CHECK-NEXT: vmv.x.s a0, v17
-; CHECK-NEXT: vmv.s.x v24, a0
+; CHECK-NEXT: vslidedown.vi v24, v16, 5
; CHECK-NEXT: li a0, 432
; CHECK-NEXT: li a1, 431
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
; CHECK-NEXT: vslideup.vx v8, v24, a1
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v16, 4
-; CHECK-NEXT: vmv.x.s a0, v16
-; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: li a0, 466
; CHECK-NEXT: li a1, 465
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
@@ -109,20 +105,17 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: addi a1, sp, 512
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vse8.v v8, (a1)
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v0, v24, 5
; CHECK-NEXT: vmv.x.s a1, v24
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v25, v24, 5
-; CHECK-NEXT: vmv.x.s a1, v25
-; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: li a1, 432
; CHECK-NEXT: li a2, 431
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: vslideup.vx v8, v0, a2
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v24, 4
-; CHECK-NEXT: vmv.x.s a1, v24
-; CHECK-NEXT: vmv.s.x v24, a1
; CHECK-NEXT: li a1, 466
; CHECK-NEXT: li a2, 465
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
@@ -130,9 +123,9 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: vslideup.vx v8, v24, a2
; CHECK-NEXT: vmv.s.x v24, a1
; CHECK-NEXT: li a1, 478
+; CHECK-NEXT: li a2, 477
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: lbu a1, 1012(sp)
-; CHECK-NEXT: li a2, 477
; CHECK-NEXT: vslideup.vx v8, v24, a2
; CHECK-NEXT: vmv.s.x v24, a1
; CHECK-NEXT: li a1, 501
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index b874a4477f5d17..02a989a9699606 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -802,27 +802,25 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1>
; CHECK-LABEL: vpreduce_xor_v64i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vslidedown.vi v24, v0, 4
-; CHECK-NEXT: addi a2, a1, -32
-; CHECK-NEXT: sltu a3, a1, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: li a4, 32
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: bltu a1, a4, .LBB49_2
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: bltu a1, a3, .LBB49_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: li a2, 32
; CHECK-NEXT: .LBB49_2:
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv.s.x v25, a0
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vredxor.vs v25, v8, v25, v0.t
-; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vredxor.vs v25, v8, v25, v0.t
+; CHECK-NEXT: addi a0, a1, -32
+; CHECK-NEXT: sltu a1, a1, a0
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vredxor.vs v8, v16, v8, v0.t
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vredxor.vs v25, v16, v25, v0.t
+; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: ret
%r = call i32 @llvm.vp.reduce.xor.v64i32(i32 %s, <64 x i32> %v, <64 x i1> %m, i32 %evl)
ret i32 %r
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index 95b64cb662a614..7bcf37b1af3c8f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -1115,13 +1115,10 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, <vscale x 32 x i32> %
; CHECK-NEXT: vmv.s.x v25, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vredmaxu.vs v25, v8, v25, v0.t
-; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: vsetivli zero, 1, e32, m8, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vredmaxu.vs v8, v16, v8, v0.t
-; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: vredmaxu.vs v25, v16, v25, v0.t
+; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: ret
%r = call i32 @llvm.vp.reduce.umax.nxv32i32(i32 %s, <vscale x 32 x i32> %v, <vscale x 32 x i1> %m, i32 %evl)
ret i32 %r
More information about the llvm-commits
mailing list