[llvm] 2c79801 - [RISCV] Add more ineg+setcc isel patterns to avoid creating neg+xori+slti(u).
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 11 14:25:01 PDT 2022
Author: Craig Topper
Date: 2022-08-11T14:24:09-07:00
New Revision: 2c79801a0e572e1f25a249596d963ad178cfcda5
URL: https://github.com/llvm/llvm-project/commit/2c79801a0e572e1f25a249596d963ad178cfcda5
DIFF: https://github.com/llvm/llvm-project/commit/2c79801a0e572e1f25a249596d963ad178cfcda5.diff
LOG: [RISCV] Add more ineg+setcc isel patterns to avoid creating neg+xori+slti(u).
Including patterns to select addiw if only the lower 32 bits are used.
I'm not excited about adding this many patterns. I'm looking at whether
we can create the xori during lowering and move the ineg patterns to
DAGCombiner.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfo.td
llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 8ca8435f31f99..6b0d790f913db 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -1284,10 +1284,38 @@ def : Pat<(setugt GPR:$rs1, simm12_minus1_nonzero:$imm),
// If negating a pattern that requires an XORI above, we can fold the XORI with
// the NEG. The XORI is equivalent to 1-X and negating gives X-1.
-def : Pat<(ineg (setuge GPR:$rs1, GPR:$rs2)), (ADDI (SLTU GPR:$rs1, GPR:$rs2), -1)>;
-def : Pat<(ineg (setule GPR:$rs1, GPR:$rs2)), (ADDI (SLTU GPR:$rs2, GPR:$rs1), -1)>;
-def : Pat<(ineg (setge GPR:$rs1, GPR:$rs2)), (ADDI (SLT GPR:$rs1, GPR:$rs2), -1)>;
-def : Pat<(ineg (setle GPR:$rs1, GPR:$rs2)), (ADDI (SLT GPR:$rs2, GPR:$rs1), -1)>;
+def : Pat<(ineg (setuge GPR:$rs1, GPR:$rs2)),
+ (ADDI (SLTU GPR:$rs1, GPR:$rs2), -1)>;
+def : Pat<(ineg (setule GPR:$rs1, GPR:$rs2)),
+ (ADDI (SLTU GPR:$rs2, GPR:$rs1), -1)>;
+def : Pat<(ineg (setge GPR:$rs1, GPR:$rs2)),
+ (ADDI (SLT GPR:$rs1, GPR:$rs2), -1)>;
+def : Pat<(ineg (setle GPR:$rs1, GPR:$rs2)),
+ (ADDI (SLT GPR:$rs2, GPR:$rs1), -1)>;
+def : Pat<(ineg (setgt GPR:$rs1, simm12_minus1_nonzero:$imm)),
+ (ADDI (SLTI GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
+def : Pat<(ineg (setugt GPR:$rs1, simm12_minus1_nonzero:$imm)),
+ (ADDI (SLTIU GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
+
+def ineg_allwusers : PatFrag<(ops node:$src),
+ (ineg node:$src), [{
+ return hasAllWUsers(Node);
+}]>;
+
+let Predicates = [IsRV64] in {
+def : Pat<(ineg_allwusers (setuge GPR:$rs1, GPR:$rs2)),
+ (ADDIW (SLTU GPR:$rs1, GPR:$rs2), -1)>;
+def : Pat<(ineg_allwusers (setule GPR:$rs1, GPR:$rs2)),
+ (ADDIW (SLTU GPR:$rs2, GPR:$rs1), -1)>;
+def : Pat<(ineg_allwusers (setge GPR:$rs1, GPR:$rs2)),
+ (ADDIW (SLT GPR:$rs1, GPR:$rs2), -1)>;
+def : Pat<(ineg_allwusers (setle GPR:$rs1, GPR:$rs2)),
+ (ADDIW (SLT GPR:$rs2, GPR:$rs1), -1)>;
+def : Pat<(ineg_allwusers (setgt GPR:$rs1, simm12_minus1_nonzero:$imm)),
+ (ADDIW (SLTI GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
+def : Pat<(ineg_allwusers (setugt GPR:$rs1, simm12_minus1_nonzero:$imm)),
+ (ADDIW (SLTIU GPR:$rs1, (ImmPlus1 simm12_minus1_nonzero:$imm)), -1)>;
+}
def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
diff --git a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
index 6208c85a8ecab..fac578b72508a 100644
--- a/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/urem-seteq-illegal-types.ll
@@ -343,9 +343,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV32-NEXT: slli a0, a0, 21
; RV32-NEXT: srli a0, a0, 22
; RV32-NEXT: or a0, a0, a1
-; RV32-NEXT: andi a0, a0, 2047
-; RV32-NEXT: sltiu a0, a0, 342
-; RV32-NEXT: xori s3, a0, 1
+; RV32-NEXT: andi s3, a0, 2047
; RV32-NEXT: li a1, 819
; RV32-NEXT: mv a0, s1
; RV32-NEXT: call __mulsi3 at plt
@@ -358,10 +356,10 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV32-NEXT: call __mulsi3 at plt
; RV32-NEXT: addi a0, a0, -1463
; RV32-NEXT: andi a0, a0, 2047
+; RV32-NEXT: sltiu a1, s3, 342
+; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: sltiu a0, a0, 293
-; RV32-NEXT: xori a0, a0, 1
-; RV32-NEXT: neg a1, s3
-; RV32-NEXT: neg a0, a0
+; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: neg a2, s1
; RV32-NEXT: slli a2, a2, 21
; RV32-NEXT: srli a2, a2, 31
@@ -403,9 +401,7 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV64-NEXT: slli a0, a0, 53
; RV64-NEXT: srli a0, a0, 54
; RV64-NEXT: or a0, a0, a1
-; RV64-NEXT: andi a0, a0, 2047
-; RV64-NEXT: sltiu a0, a0, 342
-; RV64-NEXT: xori s3, a0, 1
+; RV64-NEXT: andi s3, a0, 2047
; RV64-NEXT: li a1, 819
; RV64-NEXT: mv a0, s2
; RV64-NEXT: call __muldi3 at plt
@@ -418,10 +414,10 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV64-NEXT: call __muldi3 at plt
; RV64-NEXT: addiw a0, a0, -1463
; RV64-NEXT: andi a0, a0, 2047
+; RV64-NEXT: sltiu a1, s3, 342
+; RV64-NEXT: addiw a1, a1, -1
; RV64-NEXT: sltiu a0, a0, 293
-; RV64-NEXT: xori a0, a0, 1
-; RV64-NEXT: negw a1, s3
-; RV64-NEXT: negw a0, a0
+; RV64-NEXT: addiw a0, a0, -1
; RV64-NEXT: andi a1, a1, 2047
; RV64-NEXT: andi a0, a0, 2047
; RV64-NEXT: slli a0, a0, 11
@@ -456,8 +452,6 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV32M-NEXT: srli a2, a2, 22
; RV32M-NEXT: or a2, a2, a4
; RV32M-NEXT: andi a2, a2, 2047
-; RV32M-NEXT: sltiu a2, a2, 342
-; RV32M-NEXT: xori a2, a2, 1
; RV32M-NEXT: li a4, 819
; RV32M-NEXT: mul a1, a1, a4
; RV32M-NEXT: addi a1, a1, -1638
@@ -468,10 +462,10 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV32M-NEXT: mul a3, a3, a4
; RV32M-NEXT: addi a3, a3, -1463
; RV32M-NEXT: andi a3, a3, 2047
+; RV32M-NEXT: sltiu a2, a2, 342
+; RV32M-NEXT: addi a2, a2, -1
; RV32M-NEXT: sltiu a3, a3, 293
-; RV32M-NEXT: xori a3, a3, 1
-; RV32M-NEXT: neg a2, a2
-; RV32M-NEXT: neg a3, a3
+; RV32M-NEXT: addi a3, a3, -1
; RV32M-NEXT: neg a4, a1
; RV32M-NEXT: slli a4, a4, 21
; RV32M-NEXT: srli a4, a4, 31
@@ -501,8 +495,6 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV64M-NEXT: srli a1, a1, 54
; RV64M-NEXT: or a1, a1, a4
; RV64M-NEXT: andi a1, a1, 2047
-; RV64M-NEXT: sltiu a1, a1, 342
-; RV64M-NEXT: xori a1, a1, 1
; RV64M-NEXT: li a4, 819
; RV64M-NEXT: mulw a3, a3, a4
; RV64M-NEXT: addiw a3, a3, -1638
@@ -513,10 +505,10 @@ define void @test_urem_vec(<3 x i11>* %X) nounwind {
; RV64M-NEXT: mulw a2, a2, a4
; RV64M-NEXT: addiw a2, a2, -1463
; RV64M-NEXT: andi a2, a2, 2047
+; RV64M-NEXT: sltiu a1, a1, 342
+; RV64M-NEXT: addiw a1, a1, -1
; RV64M-NEXT: sltiu a2, a2, 293
-; RV64M-NEXT: xori a2, a2, 1
-; RV64M-NEXT: negw a1, a1
-; RV64M-NEXT: negw a2, a2
+; RV64M-NEXT: addiw a2, a2, -1
; RV64M-NEXT: andi a1, a1, 2047
; RV64M-NEXT: andi a2, a2, 2047
; RV64M-NEXT: slli a2, a2, 11
More information about the llvm-commits
mailing list