[llvm] 53ce22e - Recommit "[RISCV] Use setcc's original SDLoc when inverting it in performSUBCombine."

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Aug 16 16:00:10 PDT 2022


Author: Craig Topper
Date: 2022-08-16T15:51:07-07:00
New Revision: 53ce22e42971e2ad1acc561496bdf043d3ab0cfa

URL: https://github.com/llvm/llvm-project/commit/53ce22e42971e2ad1acc561496bdf043d3ab0cfa
DIFF: https://github.com/llvm/llvm-project/commit/53ce22e42971e2ad1acc561496bdf043d3ab0cfa.diff

LOG: Recommit "[RISCV] Use setcc's original SDLoc when inverting it in performSUBCombine."

This time using N1 instead of N0 since N1 points to the original
setcc. This now affects scheduling as I expected.

Original commit message:
We change seteq<->setne but it doesn't change the semantics
of the setcc. We should keep original debug location. This is
consistent with visitXor in the generic DAGCombiner.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
    llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 937491f99212..013d0ff5ffe1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -8298,7 +8298,7 @@ static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
       // and may increase the number of constants we need.
       if (ImmValMinus1.isSignedIntN(12)) {
         CCVal = ISD::getSetCCInverse(CCVal, SetCCOpVT);
-        SDValue NewN0 = DAG.getSetCC(SDLoc(N), VT, N1.getOperand(0),
+        SDValue NewN0 = DAG.getSetCC(SDLoc(N1), VT, N1.getOperand(0),
                                      N1.getOperand(1), CCVal);
         SDValue NewN1 = DAG.getConstant(ImmValMinus1, SDLoc(N), VT);
         return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewN0, NewN1);

diff  --git a/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll b/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
index f1ad9b3127db..c94f77245bf0 100644
--- a/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
+++ b/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll
@@ -5,22 +5,22 @@
 define void @getSetCCResultType(<4 x i32>* %p, <4 x i32>* %q) nounwind {
 ; RV32I-LABEL: getSetCCResultType:
 ; RV32I:       # %bb.0: # %entry
-; RV32I-NEXT:    lw a1, 0(a0)
-; RV32I-NEXT:    lw a2, 12(a0)
+; RV32I-NEXT:    lw a1, 12(a0)
+; RV32I-NEXT:    lw a2, 8(a0)
 ; RV32I-NEXT:    lw a3, 4(a0)
-; RV32I-NEXT:    lw a4, 8(a0)
+; RV32I-NEXT:    lw a4, 0(a0)
 ; RV32I-NEXT:    snez a1, a1
-; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    snez a2, a2
 ; RV32I-NEXT:    snez a3, a3
-; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    snez a4, a4
 ; RV32I-NEXT:    addi a4, a4, -1
-; RV32I-NEXT:    snez a2, a2
+; RV32I-NEXT:    addi a3, a3, -1
 ; RV32I-NEXT:    addi a2, a2, -1
-; RV32I-NEXT:    sw a2, 12(a0)
-; RV32I-NEXT:    sw a4, 8(a0)
+; RV32I-NEXT:    addi a1, a1, -1
+; RV32I-NEXT:    sw a1, 12(a0)
+; RV32I-NEXT:    sw a2, 8(a0)
 ; RV32I-NEXT:    sw a3, 4(a0)
-; RV32I-NEXT:    sw a1, 0(a0)
+; RV32I-NEXT:    sw a4, 0(a0)
 ; RV32I-NEXT:    ret
 entry:
   %0 = load <4 x i32>, <4 x i32>* %p, align 16

diff  --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 42c72e0ae32a..58a3db745bb1 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -347,13 +347,13 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32-NEXT:    call __moddi3 at plt
 ; RV32-NEXT:    xori a2, s2, 2
 ; RV32-NEXT:    or a2, a2, s3
+; RV32-NEXT:    seqz a2, a2
 ; RV32-NEXT:    xori a3, s5, 1
 ; RV32-NEXT:    or a3, a3, s6
+; RV32-NEXT:    seqz a3, a3
 ; RV32-NEXT:    or a0, a0, a1
 ; RV32-NEXT:    snez a0, a0
-; RV32-NEXT:    seqz a1, a3
-; RV32-NEXT:    addi a1, a1, -1
-; RV32-NEXT:    seqz a2, a2
+; RV32-NEXT:    addi a1, a3, -1
 ; RV32-NEXT:    addi a2, a2, -1
 ; RV32-NEXT:    neg a3, a0
 ; RV32-NEXT:    sw a3, 0(s0)
@@ -389,24 +389,24 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
 ; RV64-NEXT:    mv s0, a0
-; RV64-NEXT:    lwu a0, 8(a0)
-; RV64-NEXT:    ld a1, 0(s0)
-; RV64-NEXT:    slli a2, a0, 31
-; RV64-NEXT:    srli a3, a1, 33
-; RV64-NEXT:    lb a4, 12(s0)
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    slli a2, a2, 31
-; RV64-NEXT:    srai s1, a2, 31
-; RV64-NEXT:    slli a2, a4, 32
-; RV64-NEXT:    or a0, a0, a2
+; RV64-NEXT:    lb a0, 12(a0)
+; RV64-NEXT:    lwu a1, 8(s0)
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    ld a2, 0(s0)
 ; RV64-NEXT:    slli a0, a0, 29
+; RV64-NEXT:    srai s1, a0, 31
+; RV64-NEXT:    slli a0, a1, 31
+; RV64-NEXT:    srli a1, a2, 33
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    srai a0, a0, 31
-; RV64-NEXT:    slli a1, a1, 31
+; RV64-NEXT:    slli a1, a2, 31
 ; RV64-NEXT:    srai s2, a1, 31
-; RV64-NEXT:    li a1, -5
+; RV64-NEXT:    li a1, 7
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    mv s3, a0
-; RV64-NEXT:    li a1, 7
+; RV64-NEXT:    li a1, -5
 ; RV64-NEXT:    mv a0, s1
 ; RV64-NEXT:    call __moddi3 at plt
 ; RV64-NEXT:    mv s1, a0
@@ -421,24 +421,24 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV64-NEXT:    srli a0, a0, 1
 ; RV64-NEXT:    or a0, a0, a2
 ; RV64-NEXT:    sltu a0, a1, a0
-; RV64-NEXT:    neg a0, a0
-; RV64-NEXT:    addi a1, s1, -1
+; RV64-NEXT:    addi a1, s1, -2
 ; RV64-NEXT:    seqz a1, a1
-; RV64-NEXT:    addi a1, a1, -1
-; RV64-NEXT:    addi a2, s3, -2
+; RV64-NEXT:    addi a2, s3, -1
 ; RV64-NEXT:    seqz a2, a2
+; RV64-NEXT:    neg a0, a0
 ; RV64-NEXT:    addi a2, a2, -1
-; RV64-NEXT:    slli a3, a2, 29
+; RV64-NEXT:    addi a1, a1, -1
+; RV64-NEXT:    slli a3, a1, 29
 ; RV64-NEXT:    srli a3, a3, 61
 ; RV64-NEXT:    sb a3, 12(s0)
-; RV64-NEXT:    slli a2, a2, 2
-; RV64-NEXT:    slli a3, a1, 31
+; RV64-NEXT:    slli a1, a1, 2
+; RV64-NEXT:    slli a3, a2, 31
 ; RV64-NEXT:    srli a3, a3, 62
-; RV64-NEXT:    or a2, a3, a2
-; RV64-NEXT:    sw a2, 8(s0)
+; RV64-NEXT:    or a1, a3, a1
+; RV64-NEXT:    sw a1, 8(s0)
 ; RV64-NEXT:    slli a0, a0, 31
 ; RV64-NEXT:    srli a0, a0, 31
-; RV64-NEXT:    slli a1, a1, 33
+; RV64-NEXT:    slli a1, a2, 33
 ; RV64-NEXT:    or a0, a0, a1
 ; RV64-NEXT:    sd a0, 0(s0)
 ; RV64-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
@@ -498,13 +498,13 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ; RV32M-NEXT:    call __moddi3 at plt
 ; RV32M-NEXT:    xori a2, s2, 2
 ; RV32M-NEXT:    or a2, a2, s3
+; RV32M-NEXT:    seqz a2, a2
 ; RV32M-NEXT:    xori a3, s5, 1
 ; RV32M-NEXT:    or a3, a3, s6
+; RV32M-NEXT:    seqz a3, a3
 ; RV32M-NEXT:    or a0, a0, a1
 ; RV32M-NEXT:    snez a0, a0
-; RV32M-NEXT:    seqz a1, a3
-; RV32M-NEXT:    addi a1, a1, -1
-; RV32M-NEXT:    seqz a2, a2
+; RV32M-NEXT:    addi a1, a3, -1
 ; RV32M-NEXT:    addi a2, a2, -1
 ; RV32M-NEXT:    neg a3, a0
 ; RV32M-NEXT:    sw a3, 0(s0)
@@ -533,65 +533,65 @@ define void @test_srem_vec(<3 x i33>* %X) nounwind {
 ;
 ; RV64M-LABEL: test_srem_vec:
 ; RV64M:       # %bb.0:
-; RV64M-NEXT:    lwu a1, 8(a0)
-; RV64M-NEXT:    ld a2, 0(a0)
-; RV64M-NEXT:    lb a3, 12(a0)
-; RV64M-NEXT:    slli a4, a1, 31
-; RV64M-NEXT:    srli a5, a2, 33
-; RV64M-NEXT:    or a4, a5, a4
-; RV64M-NEXT:    slli a3, a3, 32
-; RV64M-NEXT:    lui a5, %hi(.LCPI3_0)
-; RV64M-NEXT:    ld a5, %lo(.LCPI3_0)(a5)
-; RV64M-NEXT:    or a1, a1, a3
+; RV64M-NEXT:    lb a1, 12(a0)
+; RV64M-NEXT:    lwu a2, 8(a0)
+; RV64M-NEXT:    slli a1, a1, 32
+; RV64M-NEXT:    or a1, a2, a1
+; RV64M-NEXT:    ld a3, 0(a0)
 ; RV64M-NEXT:    slli a1, a1, 29
 ; RV64M-NEXT:    srai a1, a1, 31
-; RV64M-NEXT:    mulh a3, a1, a5
-; RV64M-NEXT:    srli a5, a3, 63
-; RV64M-NEXT:    srai a3, a3, 1
-; RV64M-NEXT:    add a3, a3, a5
-; RV64M-NEXT:    slli a5, a3, 2
-; RV64M-NEXT:    add a3, a5, a3
+; RV64M-NEXT:    slli a2, a2, 31
+; RV64M-NEXT:    srli a4, a3, 33
+; RV64M-NEXT:    lui a5, %hi(.LCPI3_0)
+; RV64M-NEXT:    ld a5, %lo(.LCPI3_0)(a5)
+; RV64M-NEXT:    or a2, a4, a2
+; RV64M-NEXT:    slli a2, a2, 31
+; RV64M-NEXT:    srai a2, a2, 31
+; RV64M-NEXT:    mulh a4, a2, a5
+; RV64M-NEXT:    srli a5, a4, 63
+; RV64M-NEXT:    srai a4, a4, 1
+; RV64M-NEXT:    add a4, a4, a5
+; RV64M-NEXT:    slli a5, a4, 3
+; RV64M-NEXT:    sub a4, a4, a5
 ; RV64M-NEXT:    lui a5, %hi(.LCPI3_1)
 ; RV64M-NEXT:    ld a5, %lo(.LCPI3_1)(a5)
-; RV64M-NEXT:    slli a4, a4, 31
-; RV64M-NEXT:    srai a4, a4, 31
-; RV64M-NEXT:    add a1, a1, a3
-; RV64M-NEXT:    mulh a3, a4, a5
-; RV64M-NEXT:    srli a5, a3, 63
-; RV64M-NEXT:    srai a3, a3, 1
-; RV64M-NEXT:    add a3, a3, a5
-; RV64M-NEXT:    slli a5, a3, 3
-; RV64M-NEXT:    sub a3, a3, a5
-; RV64M-NEXT:    add a3, a4, a3
+; RV64M-NEXT:    slli a3, a3, 31
+; RV64M-NEXT:    srai a3, a3, 31
+; RV64M-NEXT:    add a2, a2, a4
+; RV64M-NEXT:    mulh a4, a1, a5
+; RV64M-NEXT:    srli a5, a4, 63
+; RV64M-NEXT:    srai a4, a4, 1
+; RV64M-NEXT:    add a4, a4, a5
+; RV64M-NEXT:    slli a5, a4, 2
+; RV64M-NEXT:    add a4, a5, a4
+; RV64M-NEXT:    add a1, a1, a4
+; RV64M-NEXT:    addi a1, a1, -2
+; RV64M-NEXT:    seqz a1, a1
 ; RV64M-NEXT:    lui a4, %hi(.LCPI3_2)
 ; RV64M-NEXT:    ld a4, %lo(.LCPI3_2)(a4)
 ; RV64M-NEXT:    lui a5, %hi(.LCPI3_3)
 ; RV64M-NEXT:    ld a5, %lo(.LCPI3_3)(a5)
-; RV64M-NEXT:    slli a2, a2, 31
-; RV64M-NEXT:    srai a2, a2, 31
-; RV64M-NEXT:    mul a2, a2, a4
-; RV64M-NEXT:    add a2, a2, a5
-; RV64M-NEXT:    slli a4, a2, 63
-; RV64M-NEXT:    srli a2, a2, 1
-; RV64M-NEXT:    or a2, a2, a4
-; RV64M-NEXT:    sltu a2, a5, a2
-; RV64M-NEXT:    addi a3, a3, -1
-; RV64M-NEXT:    seqz a3, a3
-; RV64M-NEXT:    addi a3, a3, -1
-; RV64M-NEXT:    addi a1, a1, -2
-; RV64M-NEXT:    seqz a1, a1
+; RV64M-NEXT:    addi a2, a2, -1
+; RV64M-NEXT:    seqz a2, a2
+; RV64M-NEXT:    mul a3, a3, a4
+; RV64M-NEXT:    add a3, a3, a5
+; RV64M-NEXT:    slli a4, a3, 63
+; RV64M-NEXT:    srli a3, a3, 1
+; RV64M-NEXT:    or a3, a3, a4
+; RV64M-NEXT:    sltu a3, a5, a3
+; RV64M-NEXT:    addi a2, a2, -1
 ; RV64M-NEXT:    addi a1, a1, -1
-; RV64M-NEXT:    neg a2, a2
+; RV64M-NEXT:    neg a3, a3
 ; RV64M-NEXT:    slli a4, a1, 29
 ; RV64M-NEXT:    srli a4, a4, 61
 ; RV64M-NEXT:    sb a4, 12(a0)
-; RV64M-NEXT:    slli a4, a3, 33
-; RV64M-NEXT:    slli a2, a2, 31
-; RV64M-NEXT:    srli a2, a2, 31
-; RV64M-NEXT:    or a2, a2, a4
-; RV64M-NEXT:    sd a2, 0(a0)
+; RV64M-NEXT:    slli a4, a2, 33
+; RV64M-NEXT:    slli a3, a3, 31
+; RV64M-NEXT:    srli a3, a3, 31
+; RV64M-NEXT:    or a3, a3, a4
+; RV64M-NEXT:    sd a3, 0(a0)
 ; RV64M-NEXT:    slli a1, a1, 2
-; RV64M-NEXT:    slli a2, a3, 31
+; RV64M-NEXT:    slli a2, a2, 31
 ; RV64M-NEXT:    srli a2, a2, 62
 ; RV64M-NEXT:    or a1, a2, a1
 ; RV64M-NEXT:    sw a1, 8(a0)


        


More information about the llvm-commits mailing list