[llvm-branch-commits] [llvm] 141e45b - [RISCV] Optimize Branch Comparisons

Sam Elliott via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Fri Jan 15 03:33:04 PST 2021


Author: Sam Elliott
Date: 2021-01-15T11:28:19Z
New Revision: 141e45b99ca09235b985504e8108dbb3cf210fbd

URL: https://github.com/llvm/llvm-project/commit/141e45b99ca09235b985504e8108dbb3cf210fbd
DIFF: https://github.com/llvm/llvm-project/commit/141e45b99ca09235b985504e8108dbb3cf210fbd.diff

LOG: [RISCV] Optimize Branch Comparisons

I noticed in D94450 that there were quite a few places where we generate
the sequence:
```
  xN <- comparison ...
  xN <- xor xN, 1
  bnez xN, symbol
```

Given we know the XOR will be used by BRCOND, which only looks at the lowest
bit, I think we can remove the XOR and just invert the branch condition in
these cases?

The case mostly seems to come up in floating point tests, where there is often
more logic to combine the results of multiple SETCCs, rather than a single
(BRCOND (SETCC ...) ...).

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D94535

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfo.td
    llvm/test/CodeGen/RISCV/double-br-fcmp.ll
    llvm/test/CodeGen/RISCV/float-br-fcmp.ll
    llvm/test/CodeGen/RISCV/half-br-fcmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 4aee8ae39cc2..6b0967e12736 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -966,9 +966,13 @@ def : BccSwapPat<setle, BGE>;
 def : BccSwapPat<setugt, BLTU>;
 def : BccSwapPat<setule, BGEU>;
 
-// An extra pattern is needed for a brcond without a setcc (i.e. where the
+// Extra patterns are needed for a brcond without a setcc (i.e. where the
 // condition was calculated elsewhere).
 def : Pat<(brcond GPR:$cond, bb:$imm12), (BNE GPR:$cond, X0, bb:$imm12)>;
+// In this pattern, the `(xor $cond, 1)` functions like (boolean) `not`, as the
+// `brcond` only uses the lowest bit.
+def : Pat<(brcond (XLenVT (xor GPR:$cond, 1)), bb:$imm12),
+          (BEQ GPR:$cond, X0, bb:$imm12)>;
 
 let isBarrier = 1, isBranch = 1, isTerminator = 1 in
 def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>,

diff  --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index 94744a22364d..6336d7b12ef3 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -100,8 +100,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft1, 0(sp)
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    beqz a0, .LBB2_2
+; RV32IFD-NEXT:    bnez a0, .LBB2_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -116,8 +115,7 @@ define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    beqz a0, .LBB2_2
+; RV64IFD-NEXT:    bnez a0, .LBB2_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -460,8 +458,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft1, 0(sp)
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB10_2
+; RV32IFD-NEXT:    beqz a0, .LBB10_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -476,8 +473,7 @@ define void @br_fcmp_ugt(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    fle.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB10_2
+; RV64IFD-NEXT:    beqz a0, .LBB10_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -505,8 +501,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft1, 0(sp)
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB11_2
+; RV32IFD-NEXT:    beqz a0, .LBB11_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -521,8 +516,7 @@ define void @br_fcmp_uge(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    flt.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB11_2
+; RV64IFD-NEXT:    beqz a0, .LBB11_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -550,8 +544,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft1, 0(sp)
 ; RV32IFD-NEXT:    fle.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB12_2
+; RV32IFD-NEXT:    beqz a0, .LBB12_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -566,8 +559,7 @@ define void @br_fcmp_ult(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    fle.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB12_2
+; RV64IFD-NEXT:    beqz a0, .LBB12_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -595,8 +587,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a3, 4(sp)
 ; RV32IFD-NEXT:    fld ft1, 0(sp)
 ; RV32IFD-NEXT:    flt.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB13_2
+; RV32IFD-NEXT:    beqz a0, .LBB13_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -611,8 +602,7 @@ define void @br_fcmp_ule(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a0
 ; RV64IFD-NEXT:    fmv.d.x ft1, a1
 ; RV64IFD-NEXT:    flt.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB13_2
+; RV64IFD-NEXT:    beqz a0, .LBB13_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16
@@ -640,8 +630,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV32IFD-NEXT:    sw a1, 4(sp)
 ; RV32IFD-NEXT:    fld ft1, 0(sp)
 ; RV32IFD-NEXT:    feq.d a0, ft1, ft0
-; RV32IFD-NEXT:    xori a0, a0, 1
-; RV32IFD-NEXT:    bnez a0, .LBB14_2
+; RV32IFD-NEXT:    beqz a0, .LBB14_2
 ; RV32IFD-NEXT:  # %bb.1: # %if.else
 ; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IFD-NEXT:    addi sp, sp, 16
@@ -656,8 +645,7 @@ define void @br_fcmp_une(double %a, double %b) nounwind {
 ; RV64IFD-NEXT:    fmv.d.x ft0, a1
 ; RV64IFD-NEXT:    fmv.d.x ft1, a0
 ; RV64IFD-NEXT:    feq.d a0, ft1, ft0
-; RV64IFD-NEXT:    xori a0, a0, 1
-; RV64IFD-NEXT:    bnez a0, .LBB14_2
+; RV64IFD-NEXT:    beqz a0, .LBB14_2
 ; RV64IFD-NEXT:  # %bb.1: # %if.else
 ; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IFD-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index 3ef15ccb9337..a941ebc24e28 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -93,8 +93,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    feq.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    beqz a0, .LBB2_2
+; RV32IF-NEXT:    bnez a0, .LBB2_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -109,8 +108,7 @@ define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    feq.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    beqz a0, .LBB2_2
+; RV64IF-NEXT:    bnez a0, .LBB2_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -421,8 +419,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    fle.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB10_2
+; RV32IF-NEXT:    beqz a0, .LBB10_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -437,8 +434,7 @@ define void @br_fcmp_ugt(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    fle.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB10_2
+; RV64IF-NEXT:    beqz a0, .LBB10_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -462,8 +458,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    flt.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB11_2
+; RV32IF-NEXT:    beqz a0, .LBB11_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -478,8 +473,7 @@ define void @br_fcmp_uge(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    flt.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB11_2
+; RV64IF-NEXT:    beqz a0, .LBB11_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -503,8 +497,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    fle.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB12_2
+; RV32IF-NEXT:    beqz a0, .LBB12_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -519,8 +512,7 @@ define void @br_fcmp_ult(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    fle.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB12_2
+; RV64IF-NEXT:    beqz a0, .LBB12_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -544,8 +536,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a0
 ; RV32IF-NEXT:    fmv.w.x ft1, a1
 ; RV32IF-NEXT:    flt.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB13_2
+; RV32IF-NEXT:    beqz a0, .LBB13_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -560,8 +551,7 @@ define void @br_fcmp_ule(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a0
 ; RV64IF-NEXT:    fmv.w.x ft1, a1
 ; RV64IF-NEXT:    flt.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB13_2
+; RV64IF-NEXT:    beqz a0, .LBB13_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16
@@ -585,8 +575,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV32IF-NEXT:    fmv.w.x ft0, a1
 ; RV32IF-NEXT:    fmv.w.x ft1, a0
 ; RV32IF-NEXT:    feq.s a0, ft1, ft0
-; RV32IF-NEXT:    xori a0, a0, 1
-; RV32IF-NEXT:    bnez a0, .LBB14_2
+; RV32IF-NEXT:    beqz a0, .LBB14_2
 ; RV32IF-NEXT:  # %bb.1: # %if.else
 ; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IF-NEXT:    addi sp, sp, 16
@@ -601,8 +590,7 @@ define void @br_fcmp_une(float %a, float %b) nounwind {
 ; RV64IF-NEXT:    fmv.w.x ft0, a1
 ; RV64IF-NEXT:    fmv.w.x ft1, a0
 ; RV64IF-NEXT:    feq.s a0, ft1, ft0
-; RV64IF-NEXT:    xori a0, a0, 1
-; RV64IF-NEXT:    bnez a0, .LBB14_2
+; RV64IF-NEXT:    beqz a0, .LBB14_2
 ; RV64IF-NEXT:  # %bb.1: # %if.else
 ; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IF-NEXT:    addi sp, sp, 16

diff  --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
index a79476f0b1ac..93f35a5b9dbe 100644
--- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
@@ -87,8 +87,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    beqz a0, .LBB2_2
+; RV32IZFH-NEXT:    bnez a0, .LBB2_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -101,8 +100,7 @@ define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    beqz a0, .LBB2_2
+; RV64IZFH-NEXT:    bnez a0, .LBB2_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -383,8 +381,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fle.h a0, fa0, fa1
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB10_2
+; RV32IZFH-NEXT:    beqz a0, .LBB10_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -397,8 +394,7 @@ define void @br_fcmp_ugt(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fle.h a0, fa0, fa1
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB10_2
+; RV64IZFH-NEXT:    beqz a0, .LBB10_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -420,8 +416,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    flt.h a0, fa0, fa1
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB11_2
+; RV32IZFH-NEXT:    beqz a0, .LBB11_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -434,8 +429,7 @@ define void @br_fcmp_uge(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    flt.h a0, fa0, fa1
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB11_2
+; RV64IZFH-NEXT:    beqz a0, .LBB11_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -457,8 +451,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    fle.h a0, fa1, fa0
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB12_2
+; RV32IZFH-NEXT:    beqz a0, .LBB12_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -471,8 +464,7 @@ define void @br_fcmp_ult(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    fle.h a0, fa1, fa0
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB12_2
+; RV64IZFH-NEXT:    beqz a0, .LBB12_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -494,8 +486,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    flt.h a0, fa1, fa0
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB13_2
+; RV32IZFH-NEXT:    beqz a0, .LBB13_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -508,8 +499,7 @@ define void @br_fcmp_ule(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    flt.h a0, fa1, fa0
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB13_2
+; RV64IZFH-NEXT:    beqz a0, .LBB13_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16
@@ -531,8 +521,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV32IZFH-NEXT:    addi sp, sp, -16
 ; RV32IZFH-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
 ; RV32IZFH-NEXT:    feq.h a0, fa0, fa1
-; RV32IZFH-NEXT:    xori a0, a0, 1
-; RV32IZFH-NEXT:    bnez a0, .LBB14_2
+; RV32IZFH-NEXT:    beqz a0, .LBB14_2
 ; RV32IZFH-NEXT:  # %bb.1: # %if.else
 ; RV32IZFH-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32IZFH-NEXT:    addi sp, sp, 16
@@ -545,8 +534,7 @@ define void @br_fcmp_une(half %a, half %b) nounwind {
 ; RV64IZFH-NEXT:    addi sp, sp, -16
 ; RV64IZFH-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; RV64IZFH-NEXT:    feq.h a0, fa0, fa1
-; RV64IZFH-NEXT:    xori a0, a0, 1
-; RV64IZFH-NEXT:    bnez a0, .LBB14_2
+; RV64IZFH-NEXT:    beqz a0, .LBB14_2
 ; RV64IZFH-NEXT:  # %bb.1: # %if.else
 ; RV64IZFH-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64IZFH-NEXT:    addi sp, sp, 16


        


More information about the llvm-branch-commits mailing list