[llvm] 2c18570 - [RISCV] Remove setJumpIsExpensive(). (#74647)

via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 13 09:37:29 PST 2023


Author: Craig Topper
Date: 2023-12-13T09:37:25-08:00
New Revision: 2c185709bca195f3d5e062819bba5dd828330548

URL: https://github.com/llvm/llvm-project/commit/2c185709bca195f3d5e062819bba5dd828330548
DIFF: https://github.com/llvm/llvm-project/commit/2c185709bca195f3d5e062819bba5dd828330548.diff

LOG: [RISCV] Remove setJumpIsExpensive(). (#74647)

Middle end up optimizations can speculate away the short circuit
behavior of C/C++ && and ||. Using i1 and/or or logical select
instructions and a single branch.

SelectionDAGBuilder can turn i1 and/or/select back into multiple
branches, but this is disabled when jump is expensive.

RISC-V can use slt(u)(i) to evaluate a condition into any GPR which
makes us better than other targets that use a flag register. RISC-V also
has single instruction compare and branch. So its not clear from a code
size perspective that using compare+and/or is better.

If the full condition is dependent on multiple loads, using a logic
delays the branch resolution until all the loads are resolved even if
there is a cheap condition that makes the loads unnecessary.

PowerPC and Lanai are the only CPU targets that use setJumpIsExpensive.
NVPTX and AMDGPU also use it but they are GPU targets. PowerPC appears
to have a MachineIR pass that turns AND/OR of CR bits into multiple
branches. I don't know anything about Lanai and their reason for using
setJumpIsExpensive.

I think the decision to use logic vs branches is much more nuanced than
this big hammer. So I propose to make RISC-V match other CPU targets.

Anyone who wants the old behavior can still pass -mllvm
-jump-is-expensive=true.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/double-previous-failure.ll
    llvm/test/CodeGen/RISCV/select-and.ll
    llvm/test/CodeGen/RISCV/select-or.ll
    llvm/test/CodeGen/RISCV/setcc-logic.ll
    llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0b682b0cbb3380..c0462dc4b0bccf 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1363,9 +1363,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
   setPrefFunctionAlignment(Subtarget.getPrefFunctionAlignment());
   setPrefLoopAlignment(Subtarget.getPrefLoopAlignment());
 
-  // Jumps are expensive, compared to logic
-  setJumpIsExpensive();
-
   setTargetDAGCombine({ISD::INTRINSIC_VOID, ISD::INTRINSIC_W_CHAIN,
                        ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND,
                        ISD::OR, ISD::XOR, ISD::SETCC, ISD::SELECT});

diff  --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
index d9d3442043a90a..aec27b58e1fc49 100644
--- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll
+++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll
@@ -31,16 +31,17 @@ define i32 @main() nounwind {
 ; RV32IFD-NEXT:    fld fa5, 0(sp)
 ; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_0)
 ; RV32IFD-NEXT:    fld fa4, %lo(.LCPI1_0)(a0)
-; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_1)
-; RV32IFD-NEXT:    fld fa3, %lo(.LCPI1_1)(a0)
 ; RV32IFD-NEXT:    flt.d a0, fa5, fa4
-; RV32IFD-NEXT:    flt.d a1, fa3, fa5
-; RV32IFD-NEXT:    or a0, a0, a1
-; RV32IFD-NEXT:    beqz a0, .LBB1_2
-; RV32IFD-NEXT:  # %bb.1: # %if.then
-; RV32IFD-NEXT:    call abort at plt
-; RV32IFD-NEXT:  .LBB1_2: # %if.end
+; RV32IFD-NEXT:    bnez a0, .LBB1_3
+; RV32IFD-NEXT:  # %bb.1: # %entry
+; RV32IFD-NEXT:    lui a0, %hi(.LCPI1_1)
+; RV32IFD-NEXT:    fld fa4, %lo(.LCPI1_1)(a0)
+; RV32IFD-NEXT:    flt.d a0, fa4, fa5
+; RV32IFD-NEXT:    bnez a0, .LBB1_3
+; RV32IFD-NEXT:  # %bb.2: # %if.end
 ; RV32IFD-NEXT:    call exit at plt
+; RV32IFD-NEXT:  .LBB1_3: # %if.then
+; RV32IFD-NEXT:    call abort at plt
 ;
 ; RV32IZFINXZDINX-LABEL: main:
 ; RV32IZFINXZDINX:       # %bb.0: # %entry
@@ -56,17 +57,18 @@ define i32 @main() nounwind {
 ; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI1_0)
 ; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI1_0+4)(a2)
 ; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI1_0)(a2)
-; RV32IZFINXZDINX-NEXT:    lui a4, %hi(.LCPI1_1)
-; RV32IZFINXZDINX-NEXT:    lw a5, %lo(.LCPI1_1+4)(a4)
-; RV32IZFINXZDINX-NEXT:    lw a4, %lo(.LCPI1_1)(a4)
 ; RV32IZFINXZDINX-NEXT:    flt.d a2, a0, a2
-; RV32IZFINXZDINX-NEXT:    flt.d a0, a4, a0
-; RV32IZFINXZDINX-NEXT:    or a0, a2, a0
-; RV32IZFINXZDINX-NEXT:    beqz a0, .LBB1_2
-; RV32IZFINXZDINX-NEXT:  # %bb.1: # %if.then
-; RV32IZFINXZDINX-NEXT:    call abort at plt
-; RV32IZFINXZDINX-NEXT:  .LBB1_2: # %if.end
+; RV32IZFINXZDINX-NEXT:    bnez a2, .LBB1_3
+; RV32IZFINXZDINX-NEXT:  # %bb.1: # %entry
+; RV32IZFINXZDINX-NEXT:    lui a2, %hi(.LCPI1_1)
+; RV32IZFINXZDINX-NEXT:    lw a3, %lo(.LCPI1_1+4)(a2)
+; RV32IZFINXZDINX-NEXT:    lw a2, %lo(.LCPI1_1)(a2)
+; RV32IZFINXZDINX-NEXT:    flt.d a0, a2, a0
+; RV32IZFINXZDINX-NEXT:    bnez a0, .LBB1_3
+; RV32IZFINXZDINX-NEXT:  # %bb.2: # %if.end
 ; RV32IZFINXZDINX-NEXT:    call exit at plt
+; RV32IZFINXZDINX-NEXT:  .LBB1_3: # %if.then
+; RV32IZFINXZDINX-NEXT:    call abort at plt
 entry:
   %call = call double @test(double 2.000000e+00)
   %cmp = fcmp olt double %call, 2.400000e-01

diff  --git a/llvm/test/CodeGen/RISCV/select-and.ll b/llvm/test/CodeGen/RISCV/select-and.ll
index 51f411bd96548c..5ba489043f0b44 100644
--- a/llvm/test/CodeGen/RISCV/select-and.ll
+++ b/llvm/test/CodeGen/RISCV/select-and.ll
@@ -40,14 +40,15 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    and a0, a0, a1
-; RV32I-NEXT:    beqz a0, .LBB1_2
-; RV32I-NEXT:  # %bb.1: # %if.then
+; RV32I-NEXT:    beqz a0, .LBB1_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beqz a1, .LBB1_3
+; RV32I-NEXT:  # %bb.2: # %if.then
 ; RV32I-NEXT:    call both at plt
-; RV32I-NEXT:    j .LBB1_3
-; RV32I-NEXT:  .LBB1_2: # %if.else
+; RV32I-NEXT:    j .LBB1_4
+; RV32I-NEXT:  .LBB1_3: # %if.else
 ; RV32I-NEXT:    call neither at plt
-; RV32I-NEXT:  .LBB1_3: # %if.end
+; RV32I-NEXT:  .LBB1_4: # %if.end
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -56,14 +57,15 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    and a0, a0, a1
-; RV64I-NEXT:    beqz a0, .LBB1_2
-; RV64I-NEXT:  # %bb.1: # %if.then
+; RV64I-NEXT:    beqz a0, .LBB1_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    beqz a1, .LBB1_3
+; RV64I-NEXT:  # %bb.2: # %if.then
 ; RV64I-NEXT:    call both at plt
-; RV64I-NEXT:    j .LBB1_3
-; RV64I-NEXT:  .LBB1_2: # %if.else
+; RV64I-NEXT:    j .LBB1_4
+; RV64I-NEXT:  .LBB1_3: # %if.else
 ; RV64I-NEXT:    call neither at plt
-; RV64I-NEXT:  .LBB1_3: # %if.end
+; RV64I-NEXT:  .LBB1_4: # %if.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/select-or.ll b/llvm/test/CodeGen/RISCV/select-or.ll
index 9ba284c611a6b3..d378bb4dd563de 100644
--- a/llvm/test/CodeGen/RISCV/select-or.ll
+++ b/llvm/test/CodeGen/RISCV/select-or.ll
@@ -40,14 +40,15 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV32I:       # %bb.0:
 ; RV32I-NEXT:    addi sp, sp, -16
 ; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    beqz a0, .LBB1_2
-; RV32I-NEXT:  # %bb.1: # %if.then
-; RV32I-NEXT:    call either at plt
-; RV32I-NEXT:    j .LBB1_3
-; RV32I-NEXT:  .LBB1_2: # %if.else
+; RV32I-NEXT:    bnez a0, .LBB1_3
+; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bnez a1, .LBB1_3
+; RV32I-NEXT:  # %bb.2: # %if.else
 ; RV32I-NEXT:    call neither at plt
-; RV32I-NEXT:  .LBB1_3: # %if.end
+; RV32I-NEXT:    j .LBB1_4
+; RV32I-NEXT:  .LBB1_3: # %if.then
+; RV32I-NEXT:    call either at plt
+; RV32I-NEXT:  .LBB1_4: # %if.end
 ; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
 ; RV32I-NEXT:    addi sp, sp, 16
 ; RV32I-NEXT:    ret
@@ -56,14 +57,15 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
 ; RV64I:       # %bb.0:
 ; RV64I-NEXT:    addi sp, sp, -16
 ; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    beqz a0, .LBB1_2
-; RV64I-NEXT:  # %bb.1: # %if.then
-; RV64I-NEXT:    call either at plt
-; RV64I-NEXT:    j .LBB1_3
-; RV64I-NEXT:  .LBB1_2: # %if.else
+; RV64I-NEXT:    bnez a0, .LBB1_3
+; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bnez a1, .LBB1_3
+; RV64I-NEXT:  # %bb.2: # %if.else
 ; RV64I-NEXT:    call neither at plt
-; RV64I-NEXT:  .LBB1_3: # %if.end
+; RV64I-NEXT:    j .LBB1_4
+; RV64I-NEXT:  .LBB1_3: # %if.then
+; RV64I-NEXT:    call either at plt
+; RV64I-NEXT:  .LBB1_4: # %if.end
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll
index 6a6c1bcd8dec13..14e76d1ff17288 100644
--- a/llvm/test/CodeGen/RISCV/setcc-logic.ll
+++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll
@@ -298,26 +298,22 @@ declare void @bar(...)
 define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_sge_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB13_2
+; RV32I-NEXT:    blt a0, a1, .LBB13_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bne a2, a3, .LBB13_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB13_2:
+; RV32I-NEXT:  .LBB13_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_sge_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB13_2
+; RV64I-NEXT:    blt a0, a1, .LBB13_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bne a2, a3, .LBB13_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB13_2:
+; RV64I-NEXT:  .LBB13_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp sge i32 %0, %1
   %6 = icmp eq i32 %2, %3
@@ -335,26 +331,22 @@ define void @and_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_sle_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB14_2
+; RV32I-NEXT:    blt a1, a0, .LBB14_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bne a2, a3, .LBB14_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB14_2:
+; RV32I-NEXT:  .LBB14_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_sle_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB14_2
+; RV64I-NEXT:    blt a1, a0, .LBB14_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bne a2, a3, .LBB14_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB14_2:
+; RV64I-NEXT:  .LBB14_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp sle i32 %0, %1
   %6 = icmp eq i32 %2, %3
@@ -372,26 +364,22 @@ define void @and_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_uge_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB15_2
+; RV32I-NEXT:    bltu a0, a1, .LBB15_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bne a2, a3, .LBB15_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB15_2:
+; RV32I-NEXT:  .LBB15_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_uge_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB15_2
+; RV64I-NEXT:    bltu a0, a1, .LBB15_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bne a2, a3, .LBB15_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB15_2:
+; RV64I-NEXT:  .LBB15_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp uge i32 %0, %1
   %6 = icmp eq i32 %2, %3
@@ -409,26 +397,22 @@ define void @and_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_ule_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB16_2
+; RV32I-NEXT:    bltu a1, a0, .LBB16_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bne a2, a3, .LBB16_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB16_2:
+; RV32I-NEXT:  .LBB16_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_ule_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB16_2
+; RV64I-NEXT:    bltu a1, a0, .LBB16_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bne a2, a3, .LBB16_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB16_2:
+; RV64I-NEXT:  .LBB16_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp ule i32 %0, %1
   %6 = icmp eq i32 %2, %3
@@ -446,26 +430,22 @@ define void @and_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_sge_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB17_2
+; RV32I-NEXT:    blt a0, a1, .LBB17_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beq a2, a3, .LBB17_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB17_2:
+; RV32I-NEXT:  .LBB17_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_sge_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB17_2
+; RV64I-NEXT:    blt a0, a1, .LBB17_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    beq a2, a3, .LBB17_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB17_2:
+; RV64I-NEXT:  .LBB17_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp sge i32 %0, %1
   %6 = icmp ne i32 %2, %3
@@ -483,26 +463,22 @@ define void @and_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_sle_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB18_2
+; RV32I-NEXT:    blt a1, a0, .LBB18_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beq a2, a3, .LBB18_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB18_2:
+; RV32I-NEXT:  .LBB18_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_sle_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB18_2
+; RV64I-NEXT:    blt a1, a0, .LBB18_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    beq a2, a3, .LBB18_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB18_2:
+; RV64I-NEXT:  .LBB18_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp sle i32 %0, %1
   %6 = icmp ne i32 %2, %3
@@ -520,26 +496,22 @@ define void @and_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_uge_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB19_2
+; RV32I-NEXT:    bltu a0, a1, .LBB19_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beq a2, a3, .LBB19_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB19_2:
+; RV32I-NEXT:  .LBB19_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_uge_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB19_2
+; RV64I-NEXT:    bltu a0, a1, .LBB19_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    beq a2, a3, .LBB19_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB19_2:
+; RV64I-NEXT:  .LBB19_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp uge i32 %0, %1
   %6 = icmp ne i32 %2, %3
@@ -557,26 +529,22 @@ define void @and_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_ule_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB20_2
+; RV32I-NEXT:    bltu a1, a0, .LBB20_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    beq a2, a3, .LBB20_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB20_2:
+; RV32I-NEXT:  .LBB20_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_ule_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB20_2
+; RV64I-NEXT:    bltu a1, a0, .LBB20_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    beq a2, a3, .LBB20_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB20_2:
+; RV64I-NEXT:  .LBB20_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp ule i32 %0, %1
   %6 = icmp ne i32 %2, %3
@@ -594,27 +562,23 @@ define void @and_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_sge_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB21_2
+; RV32I-NEXT:    bge a0, a1, .LBB21_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB21_2:
+; RV32I-NEXT:    beq a2, a3, .LBB21_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB21_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_sge_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB21_2
+; RV64I-NEXT:    bge a0, a1, .LBB21_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB21_2:
+; RV64I-NEXT:    beq a2, a3, .LBB21_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB21_3:
+; RV64I-NEXT:    ret
   %5 = icmp sge i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = or i1 %5, %6
@@ -631,27 +595,23 @@ define void @or_sge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_sle_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB22_2
+; RV32I-NEXT:    bge a1, a0, .LBB22_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB22_2:
+; RV32I-NEXT:    beq a2, a3, .LBB22_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB22_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_sle_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB22_2
+; RV64I-NEXT:    bge a1, a0, .LBB22_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB22_2:
+; RV64I-NEXT:    beq a2, a3, .LBB22_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB22_3:
+; RV64I-NEXT:    ret
   %5 = icmp sle i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = or i1 %5, %6
@@ -668,27 +628,23 @@ define void @or_sle_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_uge_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB23_2
+; RV32I-NEXT:    bgeu a0, a1, .LBB23_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB23_2:
+; RV32I-NEXT:    beq a2, a3, .LBB23_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB23_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_uge_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB23_2
+; RV64I-NEXT:    bgeu a0, a1, .LBB23_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB23_2:
+; RV64I-NEXT:    beq a2, a3, .LBB23_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB23_3:
+; RV64I-NEXT:    ret
   %5 = icmp uge i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = or i1 %5, %6
@@ -705,27 +661,23 @@ define void @or_uge_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_ule_eq:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    snez a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB24_2
+; RV32I-NEXT:    bgeu a1, a0, .LBB24_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB24_2:
+; RV32I-NEXT:    beq a2, a3, .LBB24_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB24_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_ule_eq:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    snez a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB24_2
+; RV64I-NEXT:    bgeu a1, a0, .LBB24_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB24_2:
+; RV64I-NEXT:    beq a2, a3, .LBB24_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB24_3:
+; RV64I-NEXT:    ret
   %5 = icmp ule i32 %0, %1
   %6 = icmp eq i32 %2, %3
   %7 = or i1 %5, %6
@@ -742,27 +694,23 @@ define void @or_ule_eq(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_sge_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB25_2
+; RV32I-NEXT:    bge a0, a1, .LBB25_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB25_2:
+; RV32I-NEXT:    bne a2, a3, .LBB25_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB25_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_sge_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB25_2
+; RV64I-NEXT:    bge a0, a1, .LBB25_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB25_2:
+; RV64I-NEXT:    bne a2, a3, .LBB25_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB25_3:
+; RV64I-NEXT:    ret
   %5 = icmp sge i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = or i1 %5, %6
@@ -779,27 +727,23 @@ define void @or_sge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_sle_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB26_2
+; RV32I-NEXT:    bge a1, a0, .LBB26_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB26_2:
+; RV32I-NEXT:    bne a2, a3, .LBB26_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB26_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_sle_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB26_2
+; RV64I-NEXT:    bge a1, a0, .LBB26_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB26_2:
+; RV64I-NEXT:    bne a2, a3, .LBB26_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB26_3:
+; RV64I-NEXT:    ret
   %5 = icmp sle i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = or i1 %5, %6
@@ -816,27 +760,23 @@ define void @or_sle_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_uge_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB27_2
+; RV32I-NEXT:    bgeu a0, a1, .LBB27_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB27_2:
+; RV32I-NEXT:    bne a2, a3, .LBB27_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB27_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_uge_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB27_2
+; RV64I-NEXT:    bgeu a0, a1, .LBB27_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB27_2:
+; RV64I-NEXT:    bne a2, a3, .LBB27_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB27_3:
+; RV64I-NEXT:    ret
   %5 = icmp uge i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = or i1 %5, %6
@@ -853,27 +793,23 @@ define void @or_uge_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: or_ule_ne:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    xor a2, a2, a3
-; RV32I-NEXT:    seqz a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB28_2
+; RV32I-NEXT:    bgeu a1, a0, .LBB28_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB28_2:
+; RV32I-NEXT:    bne a2, a3, .LBB28_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB28_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_ule_ne:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    xor a2, a2, a3
-; RV64I-NEXT:    seqz a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB28_2
+; RV64I-NEXT:    bgeu a1, a0, .LBB28_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB28_2:
+; RV64I-NEXT:    bne a2, a3, .LBB28_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB28_3:
+; RV64I-NEXT:    ret
   %5 = icmp ule i32 %0, %1
   %6 = icmp ne i32 %2, %3
   %7 = or i1 %5, %6
@@ -890,26 +826,22 @@ define void @or_ule_ne(i32 signext %0, i32 signext %1, i32 signext %2, i32 signe
 define void @and_eq_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_eq_sge:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    slt a1, a2, a3
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB29_2
+; RV32I-NEXT:    bne a0, a1, .LBB29_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    blt a2, a3, .LBB29_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB29_2:
+; RV32I-NEXT:  .LBB29_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_eq_sge:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    slt a1, a2, a3
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB29_2
+; RV64I-NEXT:    bne a0, a1, .LBB29_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    blt a2, a3, .LBB29_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB29_2:
+; RV64I-NEXT:  .LBB29_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp eq  i32 %0, %1
   %6 = icmp sge i32 %2, %3
@@ -927,26 +859,22 @@ define void @and_eq_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_eq_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_eq_sle:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    slt a1, a3, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB30_2
+; RV32I-NEXT:    bne a0, a1, .LBB30_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    blt a3, a2, .LBB30_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB30_2:
+; RV32I-NEXT:  .LBB30_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_eq_sle:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    slt a1, a3, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB30_2
+; RV64I-NEXT:    bne a0, a1, .LBB30_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    blt a3, a2, .LBB30_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB30_2:
+; RV64I-NEXT:  .LBB30_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp eq  i32 %0, %1
   %6 = icmp sle i32 %2, %3
@@ -964,26 +892,22 @@ define void @and_eq_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_eq_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_eq_uge:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    sltu a1, a2, a3
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB31_2
+; RV32I-NEXT:    bne a0, a1, .LBB31_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bltu a2, a3, .LBB31_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB31_2:
+; RV32I-NEXT:  .LBB31_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_eq_uge:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    sltu a1, a2, a3
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB31_2
+; RV64I-NEXT:    bne a0, a1, .LBB31_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bltu a2, a3, .LBB31_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB31_2:
+; RV64I-NEXT:  .LBB31_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp eq  i32 %0, %1
   %6 = icmp uge i32 %2, %3
@@ -1001,26 +925,22 @@ define void @and_eq_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_eq_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_eq_ule:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    snez a0, a0
-; RV32I-NEXT:    sltu a1, a3, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB32_2
+; RV32I-NEXT:    bne a0, a1, .LBB32_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bltu a3, a2, .LBB32_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB32_2:
+; RV32I-NEXT:  .LBB32_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_eq_ule:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    snez a0, a0
-; RV64I-NEXT:    sltu a1, a3, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB32_2
+; RV64I-NEXT:    bne a0, a1, .LBB32_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bltu a3, a2, .LBB32_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB32_2:
+; RV64I-NEXT:  .LBB32_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp eq  i32 %0, %1
   %6 = icmp ule i32 %2, %3
@@ -1038,26 +958,22 @@ define void @and_eq_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_ne_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_ne_sge:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    slt a1, a2, a3
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB33_2
+; RV32I-NEXT:    beq a0, a1, .LBB33_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    blt a2, a3, .LBB33_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB33_2:
+; RV32I-NEXT:  .LBB33_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_ne_sge:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    slt a1, a2, a3
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB33_2
+; RV64I-NEXT:    beq a0, a1, .LBB33_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    blt a2, a3, .LBB33_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB33_2:
+; RV64I-NEXT:  .LBB33_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp ne  i32 %0, %1
   %6 = icmp sge i32 %2, %3
@@ -1075,26 +991,22 @@ define void @and_ne_sge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_ne_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_ne_sle:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    slt a1, a3, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB34_2
+; RV32I-NEXT:    beq a0, a1, .LBB34_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    blt a3, a2, .LBB34_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB34_2:
+; RV32I-NEXT:  .LBB34_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_ne_sle:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    slt a1, a3, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB34_2
+; RV64I-NEXT:    beq a0, a1, .LBB34_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    blt a3, a2, .LBB34_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB34_2:
+; RV64I-NEXT:  .LBB34_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp ne  i32 %0, %1
   %6 = icmp sle i32 %2, %3
@@ -1112,26 +1024,22 @@ define void @and_ne_sle(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_ne_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_ne_uge:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    sltu a1, a2, a3
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB35_2
+; RV32I-NEXT:    beq a0, a1, .LBB35_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bltu a2, a3, .LBB35_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB35_2:
+; RV32I-NEXT:  .LBB35_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_ne_uge:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    sltu a1, a2, a3
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB35_2
+; RV64I-NEXT:    beq a0, a1, .LBB35_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bltu a2, a3, .LBB35_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB35_2:
+; RV64I-NEXT:  .LBB35_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp ne  i32 %0, %1
   %6 = icmp uge i32 %2, %3
@@ -1149,26 +1057,22 @@ define void @and_ne_uge(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_ne_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 signext %3) {
 ; RV32I-LABEL: and_ne_ule:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    xor a0, a0, a1
-; RV32I-NEXT:    seqz a0, a0
-; RV32I-NEXT:    sltu a1, a3, a2
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    bnez a0, .LBB36_2
+; RV32I-NEXT:    beq a0, a1, .LBB36_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bltu a3, a2, .LBB36_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB36_2:
+; RV32I-NEXT:  .LBB36_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_ne_ule:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    xor a0, a0, a1
-; RV64I-NEXT:    seqz a0, a0
-; RV64I-NEXT:    sltu a1, a3, a2
-; RV64I-NEXT:    or a0, a0, a1
-; RV64I-NEXT:    bnez a0, .LBB36_2
+; RV64I-NEXT:    beq a0, a1, .LBB36_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bltu a3, a2, .LBB36_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB36_2:
+; RV64I-NEXT:  .LBB36_3:
 ; RV64I-NEXT:    tail bar at plt
   %5 = icmp ne  i32 %0, %1
   %6 = icmp ule i32 %2, %3
@@ -1186,24 +1090,22 @@ define void @and_ne_ule(i32 signext %0, i32 signext %1, i32 signext %2, i32 sign
 define void @and_sge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-LABEL: and_sge_gt0:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a0, a1
-; RV32I-NEXT:    slti a1, a2, 1
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB37_2
+; RV32I-NEXT:    blt a0, a1, .LBB37_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    blez a2, .LBB37_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB37_2:
+; RV32I-NEXT:  .LBB37_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_sge_gt0:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a0, a1
-; RV64I-NEXT:    slti a1, a2, 1
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB37_2
+; RV64I-NEXT:    blt a0, a1, .LBB37_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    blez a2, .LBB37_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB37_2:
+; RV64I-NEXT:  .LBB37_3:
 ; RV64I-NEXT:    tail bar at plt
   %4 = icmp sge i32 %0, %1
   %5 = icmp sgt i32 %2, 0
@@ -1221,24 +1123,22 @@ define void @and_sge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 define void @and_sle_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-LABEL: and_sle_lt1:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    slt a0, a1, a0
-; RV32I-NEXT:    sgtz a1, a2
-; RV32I-NEXT:    or a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB38_2
+; RV32I-NEXT:    blt a1, a0, .LBB38_3
 ; RV32I-NEXT:  # %bb.1:
+; RV32I-NEXT:    bgtz a2, .LBB38_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB38_2:
+; RV32I-NEXT:  .LBB38_3:
 ; RV32I-NEXT:    tail bar at plt
 ;
 ; RV64I-LABEL: and_sle_lt1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    slt a0, a1, a0
-; RV64I-NEXT:    sgtz a1, a2
-; RV64I-NEXT:    or a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB38_2
+; RV64I-NEXT:    blt a1, a0, .LBB38_3
 ; RV64I-NEXT:  # %bb.1:
+; RV64I-NEXT:    bgtz a2, .LBB38_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB38_2:
+; RV64I-NEXT:  .LBB38_3:
 ; RV64I-NEXT:    tail bar at plt
   %4 = icmp sle i32 %0, %1
   %5 = icmp slt i32 %2, 1
@@ -1256,25 +1156,23 @@ define void @and_sle_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
 define void @or_uge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-LABEL: or_uge_gt0:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a0, a1
-; RV32I-NEXT:    slti a1, a2, 1
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB39_2
+; RV32I-NEXT:    bgeu a0, a1, .LBB39_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB39_2:
+; RV32I-NEXT:    bgtz a2, .LBB39_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB39_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_uge_gt0:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a0, a1
-; RV64I-NEXT:    slti a1, a2, 1
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB39_2
+; RV64I-NEXT:    bgeu a0, a1, .LBB39_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB39_2:
+; RV64I-NEXT:    bgtz a2, .LBB39_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB39_3:
+; RV64I-NEXT:    ret
   %4 = icmp uge i32 %0, %1
   %5 = icmp sgt i32 %2, 0
   %6 = or i1 %4, %5
@@ -1291,25 +1189,23 @@ define void @or_uge_gt0(i32 signext %0, i32 signext %1, i32 signext %2) {
 define void @or_ule_lt1(i32 signext %0, i32 signext %1, i32 signext %2) {
 ; RV32I-LABEL: or_ule_lt1:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    sltu a0, a1, a0
-; RV32I-NEXT:    sgtz a1, a2
-; RV32I-NEXT:    and a0, a1, a0
-; RV32I-NEXT:    bnez a0, .LBB40_2
+; RV32I-NEXT:    bgeu a1, a0, .LBB40_3
 ; RV32I-NEXT:  # %bb.1:
-; RV32I-NEXT:    ret
-; RV32I-NEXT:  .LBB40_2:
+; RV32I-NEXT:    blez a2, .LBB40_3
+; RV32I-NEXT:  # %bb.2:
 ; RV32I-NEXT:    tail bar at plt
+; RV32I-NEXT:  .LBB40_3:
+; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: or_ule_lt1:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    sltu a0, a1, a0
-; RV64I-NEXT:    sgtz a1, a2
-; RV64I-NEXT:    and a0, a1, a0
-; RV64I-NEXT:    bnez a0, .LBB40_2
+; RV64I-NEXT:    bgeu a1, a0, .LBB40_3
 ; RV64I-NEXT:  # %bb.1:
-; RV64I-NEXT:    ret
-; RV64I-NEXT:  .LBB40_2:
+; RV64I-NEXT:    blez a2, .LBB40_3
+; RV64I-NEXT:  # %bb.2:
 ; RV64I-NEXT:    tail bar at plt
+; RV64I-NEXT:  .LBB40_3:
+; RV64I-NEXT:    ret
   %4 = icmp ule i32 %0, %1
   %5 = icmp slt i32 %2, 1
   %6 = or i1 %4, %5

diff  --git a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
index 07b921e3d90f33..771a72f8d55f0b 100644
--- a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
+++ b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll
@@ -10,16 +10,19 @@ define dso_local i32 @test_zext_i8() nounwind {
 ; RV32I-LABEL: test_zext_i8:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %hi(bytes)
-; RV32I-NEXT:    addi a1, a0, %lo(bytes)
-; RV32I-NEXT:    lbu a0, %lo(bytes)(a0)
-; RV32I-NEXT:    lbu a1, 1(a1)
-; RV32I-NEXT:    xori a0, a0, 136
-; RV32I-NEXT:    xori a1, a1, 7
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    beqz a0, .LBB0_2
-; RV32I-NEXT:  # %bb.1: # %if.then
+; RV32I-NEXT:    lbu a1, %lo(bytes)(a0)
+; RV32I-NEXT:    li a2, 136
+; RV32I-NEXT:    bne a1, a2, .LBB0_3
+; RV32I-NEXT:  # %bb.1: # %entry
+; RV32I-NEXT:    addi a0, a0, %lo(bytes)
+; RV32I-NEXT:    lbu a0, 1(a0)
+; RV32I-NEXT:    li a1, 7
+; RV32I-NEXT:    bne a0, a1, .LBB0_3
+; RV32I-NEXT:  # %bb.2: # %if.end
+; RV32I-NEXT:    li a0, 0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB0_3: # %if.then
 ; RV32I-NEXT:    li a0, 1
-; RV32I-NEXT:  .LBB0_2: # %if.end
 ; RV32I-NEXT:    ret
 entry:
   %0 = load i8, ptr @bytes, align 1
@@ -42,18 +45,20 @@ define dso_local i32 @test_zext_i16() nounwind {
 ; RV32I-LABEL: test_zext_i16:
 ; RV32I:       # %bb.0: # %entry
 ; RV32I-NEXT:    lui a0, %hi(shorts)
-; RV32I-NEXT:    addi a1, a0, %lo(shorts)
-; RV32I-NEXT:    lhu a0, %lo(shorts)(a0)
-; RV32I-NEXT:    lhu a1, 2(a1)
+; RV32I-NEXT:    lhu a1, %lo(shorts)(a0)
 ; RV32I-NEXT:    lui a2, 16
 ; RV32I-NEXT:    addi a2, a2, -120
-; RV32I-NEXT:    xor a0, a0, a2
-; RV32I-NEXT:    xori a1, a1, 7
-; RV32I-NEXT:    or a0, a0, a1
-; RV32I-NEXT:    beqz a0, .LBB1_2
-; RV32I-NEXT:  # %bb.1: # %if.then
+; RV32I-NEXT:    bne a1, a2, .LBB1_3
+; RV32I-NEXT:  # %bb.1: # %entry
+; RV32I-NEXT:    addi a0, a0, %lo(shorts)
+; RV32I-NEXT:    lhu a0, 2(a0)
+; RV32I-NEXT:    li a1, 7
+; RV32I-NEXT:    bne a0, a1, .LBB1_3
+; RV32I-NEXT:  # %bb.2: # %if.end
+; RV32I-NEXT:    li a0, 0
+; RV32I-NEXT:    ret
+; RV32I-NEXT:  .LBB1_3: # %if.then
 ; RV32I-NEXT:    li a0, 1
-; RV32I-NEXT:  .LBB1_2: # %if.end
 ; RV32I-NEXT:    ret
 entry:
   %0 = load i16, ptr @shorts, align 2


        


More information about the llvm-commits mailing list