[llvm] a246eb6 - [RISCV] Mark (s/u)min_vl and (s/u)max_vl as commutable.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Fri Jul 8 09:59:53 PDT 2022
Author: Craig Topper
Date: 2022-07-08T09:59:42-07:00
New Revision: a246eb681424c59549a7cecd3eb9581253d28f04
URL: https://github.com/llvm/llvm-project/commit/a246eb681424c59549a7cecd3eb9581253d28f04
DIFF: https://github.com/llvm/llvm-project/commit/a246eb681424c59549a7cecd3eb9581253d28f04.diff
LOG: [RISCV] Mark (s/u)min_vl and (s/u)max_vl as commutable.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 081f61617d59..d2a246ad6286 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -76,10 +76,10 @@ def riscv_urem_vl : SDNode<"RISCVISD::UREM_VL", SDT_RISCVIntBinOp_VL>;
def riscv_shl_vl : SDNode<"RISCVISD::SHL_VL", SDT_RISCVIntBinOp_VL>;
def riscv_sra_vl : SDNode<"RISCVISD::SRA_VL", SDT_RISCVIntBinOp_VL>;
def riscv_srl_vl : SDNode<"RISCVISD::SRL_VL", SDT_RISCVIntBinOp_VL>;
-def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL>;
-def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL>;
-def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL>;
-def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL>;
+def riscv_smin_vl : SDNode<"RISCVISD::SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_smax_vl : SDNode<"RISCVISD::SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_umin_vl : SDNode<"RISCVISD::UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_umax_vl : SDNode<"RISCVISD::UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL>;
def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL>;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 85eddccad620..203c56af131b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -1371,8 +1371,7 @@ define void @smin_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmin.vv v8, v9, v8
+; CHECK-NEXT: vmin.vx v8, v8, a1
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
@@ -1388,8 +1387,7 @@ define void @smin_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmin.vv v8, v9, v8
+; CHECK-NEXT: vmin.vx v8, v8, a1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
@@ -1405,8 +1403,7 @@ define void @smin_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmin.vv v8, v9, v8
+; CHECK-NEXT: vmin.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
@@ -1541,8 +1538,7 @@ define void @smax_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmax.vv v8, v9, v8
+; CHECK-NEXT: vmax.vx v8, v8, a1
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
@@ -1558,8 +1554,7 @@ define void @smax_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmax.vv v8, v9, v8
+; CHECK-NEXT: vmax.vx v8, v8, a1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
@@ -1575,8 +1570,7 @@ define void @smax_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmax.vv v8, v9, v8
+; CHECK-NEXT: vmax.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
@@ -1711,8 +1705,7 @@ define void @umin_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vminu.vv v8, v9, v8
+; CHECK-NEXT: vminu.vx v8, v8, a1
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
@@ -1728,8 +1721,7 @@ define void @umin_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vminu.vv v8, v9, v8
+; CHECK-NEXT: vminu.vx v8, v8, a1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
@@ -1745,8 +1737,7 @@ define void @umin_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vminu.vv v8, v9, v8
+; CHECK-NEXT: vminu.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
@@ -1881,8 +1872,7 @@ define void @umax_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmaxu.vv v8, v9, v8
+; CHECK-NEXT: vmaxu.vx v8, v8, a1
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
@@ -1898,8 +1888,7 @@ define void @umax_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmaxu.vv v8, v9, v8
+; CHECK-NEXT: vmaxu.vx v8, v8, a1
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
@@ -1915,8 +1904,7 @@ define void @umax_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmaxu.vv v8, v9, v8
+; CHECK-NEXT: vmaxu.vx v8, v8, a1
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
More information about the llvm-commits
mailing list