[llvm] 17e2df6 - [RISCV] Removed the requirement of XLenVT for performSELECTCombine.

Mikhail Gudim via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 12 13:29:48 PDT 2023


Author: Mikhail Gudim
Date: 2023-07-12T16:29:09-04:00
New Revision: 17e2df6695d32d140dc3846caca1c8686b18a911

URL: https://github.com/llvm/llvm-project/commit/17e2df6695d32d140dc3846caca1c8686b18a911
DIFF: https://github.com/llvm/llvm-project/commit/17e2df6695d32d140dc3846caca1c8686b18a911.diff

LOG: [RISCV] Removed the requirement of XLenVT for performSELECTCombine.

Reviewed By: Craig Topper

Differential Revision: https://reviews.llvm.org/D153044

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/condops.ll
    llvm/test/CodeGen/RISCV/select.ll
    llvm/test/CodeGen/RISCV/sextw-removal.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9a79426d23ac49..e98e37d9347065 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -12135,10 +12135,6 @@ static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
   if (Subtarget.hasShortForwardBranchOpt())
     return SDValue();
 
-  // Only support XLenVT.
-  if (N->getValueType(0) != Subtarget.getXLenVT())
-    return SDValue();
-
   SDValue TrueVal = N->getOperand(1);
   SDValue FalseVal = N->getOperand(2);
   if (SDValue V = tryFoldSelectIntoOp(N, DAG, TrueVal, FalseVal, /*Swapped*/false))

diff  --git a/llvm/test/CodeGen/RISCV/condops.ll b/llvm/test/CodeGen/RISCV/condops.ll
index a88d3f70ddbe3d..1daa04a8ac372b 100644
--- a/llvm/test/CodeGen/RISCV/condops.ll
+++ b/llvm/test/CodeGen/RISCV/condops.ll
@@ -69,17 +69,12 @@ define i64 @add1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV32ZICOND-LABEL: add1:
 ; RV32ZICOND:       # %bb.0:
-; RV32ZICOND-NEXT:    add a4, a2, a4
-; RV32ZICOND-NEXT:    add a3, a1, a3
-; RV32ZICOND-NEXT:    sltu a5, a3, a1
-; RV32ZICOND-NEXT:    add a4, a4, a5
-; RV32ZICOND-NEXT:    czero.nez a1, a1, a0
-; RV32ZICOND-NEXT:    czero.eqz a3, a3, a0
-; RV32ZICOND-NEXT:    or a3, a3, a1
-; RV32ZICOND-NEXT:    czero.eqz a1, a4, a0
-; RV32ZICOND-NEXT:    czero.nez a0, a2, a0
-; RV32ZICOND-NEXT:    or a1, a1, a0
-; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    czero.eqz a4, a4, a0
+; RV32ZICOND-NEXT:    add a2, a2, a4
+; RV32ZICOND-NEXT:    czero.eqz a0, a3, a0
+; RV32ZICOND-NEXT:    add a0, a1, a0
+; RV32ZICOND-NEXT:    sltu a1, a0, a1
+; RV32ZICOND-NEXT:    add a1, a2, a1
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: add1:
@@ -107,17 +102,12 @@ define i64 @add2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV32ZICOND-LABEL: add2:
 ; RV32ZICOND:       # %bb.0:
-; RV32ZICOND-NEXT:    add a2, a2, a4
-; RV32ZICOND-NEXT:    add a5, a1, a3
-; RV32ZICOND-NEXT:    sltu a1, a5, a1
+; RV32ZICOND-NEXT:    czero.eqz a2, a2, a0
+; RV32ZICOND-NEXT:    add a2, a4, a2
+; RV32ZICOND-NEXT:    czero.eqz a0, a1, a0
+; RV32ZICOND-NEXT:    add a0, a3, a0
+; RV32ZICOND-NEXT:    sltu a1, a0, a3
 ; RV32ZICOND-NEXT:    add a1, a2, a1
-; RV32ZICOND-NEXT:    czero.nez a2, a3, a0
-; RV32ZICOND-NEXT:    czero.eqz a3, a5, a0
-; RV32ZICOND-NEXT:    or a2, a3, a2
-; RV32ZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32ZICOND-NEXT:    czero.nez a0, a4, a0
-; RV32ZICOND-NEXT:    or a1, a1, a0
-; RV32ZICOND-NEXT:    mv a0, a2
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: add2:
@@ -145,17 +135,12 @@ define i64 @add3(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV32ZICOND-LABEL: add3:
 ; RV32ZICOND:       # %bb.0:
-; RV32ZICOND-NEXT:    add a4, a2, a4
-; RV32ZICOND-NEXT:    add a3, a1, a3
-; RV32ZICOND-NEXT:    sltu a5, a3, a1
-; RV32ZICOND-NEXT:    add a4, a4, a5
-; RV32ZICOND-NEXT:    czero.eqz a1, a1, a0
-; RV32ZICOND-NEXT:    czero.nez a3, a3, a0
-; RV32ZICOND-NEXT:    or a3, a1, a3
-; RV32ZICOND-NEXT:    czero.nez a1, a4, a0
-; RV32ZICOND-NEXT:    czero.eqz a0, a2, a0
-; RV32ZICOND-NEXT:    or a1, a0, a1
-; RV32ZICOND-NEXT:    mv a0, a3
+; RV32ZICOND-NEXT:    czero.nez a4, a4, a0
+; RV32ZICOND-NEXT:    add a2, a2, a4
+; RV32ZICOND-NEXT:    czero.nez a0, a3, a0
+; RV32ZICOND-NEXT:    add a0, a1, a0
+; RV32ZICOND-NEXT:    sltu a1, a0, a1
+; RV32ZICOND-NEXT:    add a1, a2, a1
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: add3:
@@ -183,17 +168,12 @@ define i64 @add4(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV32ZICOND-LABEL: add4:
 ; RV32ZICOND:       # %bb.0:
-; RV32ZICOND-NEXT:    add a2, a2, a4
-; RV32ZICOND-NEXT:    add a5, a1, a3
-; RV32ZICOND-NEXT:    sltu a1, a5, a1
+; RV32ZICOND-NEXT:    czero.nez a2, a2, a0
+; RV32ZICOND-NEXT:    add a2, a4, a2
+; RV32ZICOND-NEXT:    czero.nez a0, a1, a0
+; RV32ZICOND-NEXT:    add a0, a3, a0
+; RV32ZICOND-NEXT:    sltu a1, a0, a3
 ; RV32ZICOND-NEXT:    add a1, a2, a1
-; RV32ZICOND-NEXT:    czero.eqz a2, a3, a0
-; RV32ZICOND-NEXT:    czero.nez a3, a5, a0
-; RV32ZICOND-NEXT:    or a2, a2, a3
-; RV32ZICOND-NEXT:    czero.nez a1, a1, a0
-; RV32ZICOND-NEXT:    czero.eqz a0, a4, a0
-; RV32ZICOND-NEXT:    or a1, a0, a1
-; RV32ZICOND-NEXT:    mv a0, a2
 ; RV32ZICOND-NEXT:    ret
 ;
 ; RV64ZICOND-LABEL: add4:
@@ -221,14 +201,12 @@ define i64 @sub1(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV32ZICOND-LABEL: sub1:
 ; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.eqz a3, a3, a0
 ; RV32ZICOND-NEXT:    sltu a5, a1, a3
-; RV32ZICOND-NEXT:    sub a4, a2, a4
-; RV32ZICOND-NEXT:    sub a4, a4, a5
-; RV32ZICOND-NEXT:    czero.eqz a4, a4, a0
-; RV32ZICOND-NEXT:    czero.nez a2, a2, a0
-; RV32ZICOND-NEXT:    or a2, a4, a2
-; RV32ZICOND-NEXT:    czero.eqz a0, a3, a0
-; RV32ZICOND-NEXT:    sub a0, a1, a0
+; RV32ZICOND-NEXT:    czero.eqz a0, a4, a0
+; RV32ZICOND-NEXT:    sub a2, a2, a0
+; RV32ZICOND-NEXT:    sub a2, a2, a5
+; RV32ZICOND-NEXT:    sub a0, a1, a3
 ; RV32ZICOND-NEXT:    mv a1, a2
 ; RV32ZICOND-NEXT:    ret
 ;
@@ -257,14 +235,12 @@ define i64 @sub2(i1 zeroext %rc, i64 %rs1, i64 %rs2) {
 ;
 ; RV32ZICOND-LABEL: sub2:
 ; RV32ZICOND:       # %bb.0:
+; RV32ZICOND-NEXT:    czero.nez a3, a3, a0
 ; RV32ZICOND-NEXT:    sltu a5, a1, a3
-; RV32ZICOND-NEXT:    sub a4, a2, a4
-; RV32ZICOND-NEXT:    sub a4, a4, a5
-; RV32ZICOND-NEXT:    czero.nez a4, a4, a0
-; RV32ZICOND-NEXT:    czero.eqz a2, a2, a0
-; RV32ZICOND-NEXT:    or a2, a2, a4
-; RV32ZICOND-NEXT:    czero.nez a0, a3, a0
-; RV32ZICOND-NEXT:    sub a0, a1, a0
+; RV32ZICOND-NEXT:    czero.nez a0, a4, a0
+; RV32ZICOND-NEXT:    sub a2, a2, a0
+; RV32ZICOND-NEXT:    sub a2, a2, a5
+; RV32ZICOND-NEXT:    sub a0, a1, a3
 ; RV32ZICOND-NEXT:    mv a1, a2
 ; RV32ZICOND-NEXT:    ret
 ;

diff  --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 1bf96ee8edb593..94b95b352e284e 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -589,19 +589,15 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IM-LABEL: select_add_1:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    beqz a0, .LBB16_2
-; RV64IM-NEXT:  # %bb.1:
-; RV64IM-NEXT:    addw a2, a1, a2
-; RV64IM-NEXT:  .LBB16_2: # %entry
-; RV64IM-NEXT:    mv a0, a2
+; RV64IM-NEXT:    negw a0, a0
+; RV64IM-NEXT:    and a0, a0, a1
+; RV64IM-NEXT:    addw a0, a2, a0
 ; RV64IM-NEXT:    ret
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_add_1:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
-; RV64IMXVTCONDOPS-NEXT:    addw a1, a1, a2
-; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a2, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskc a0, a1, a0
-; RV64IMXVTCONDOPS-NEXT:    or a0, a0, a2
+; RV64IMXVTCONDOPS-NEXT:    addw a0, a2, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_add_1:
@@ -612,10 +608,8 @@ define i32 @select_add_1(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IMZICOND-LABEL: select_add_1:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    addw a1, a1, a2
-; RV64IMZICOND-NEXT:    czero.nez a2, a2, a0
 ; RV64IMZICOND-NEXT:    czero.eqz a0, a1, a0
-; RV64IMZICOND-NEXT:    or a0, a0, a2
+; RV64IMZICOND-NEXT:    addw a0, a2, a0
 ; RV64IMZICOND-NEXT:    ret
 entry:
   %c = add i32 %a, %b
@@ -633,19 +627,15 @@ define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IM-LABEL: select_add_2:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    bnez a0, .LBB17_2
-; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    addw a1, a1, a2
-; RV64IM-NEXT:  .LBB17_2: # %entry
-; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    addiw a0, a0, -1
+; RV64IM-NEXT:    and a0, a0, a2
+; RV64IM-NEXT:    addw a0, a1, a0
 ; RV64IM-NEXT:    ret
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_add_2:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
-; RV64IMXVTCONDOPS-NEXT:    addw a2, a1, a2
-; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addw a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_add_2:
@@ -656,10 +646,8 @@ define i32 @select_add_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IMZICOND-LABEL: select_add_2:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    addw a2, a1, a2
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
 ; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
-; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    addw a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 entry:
   %c = add i32 %a, %b
@@ -677,19 +665,16 @@ define i32 @select_add_3(i1 zeroext %cond, i32 %a) {
 ;
 ; RV64IM-LABEL: select_add_3:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    bnez a0, .LBB18_2
-; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    addiw a1, a1, 42
-; RV64IM-NEXT:  .LBB18_2: # %entry
-; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    addiw a0, a0, -1
+; RV64IM-NEXT:    andi a0, a0, 42
+; RV64IM-NEXT:    addw a0, a1, a0
 ; RV64IM-NEXT:    ret
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_add_3:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
-; RV64IMXVTCONDOPS-NEXT:    addiw a2, a1, 42
-; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    li a2, 42
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    addw a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_add_3:
@@ -701,10 +686,9 @@ define i32 @select_add_3(i1 zeroext %cond, i32 %a) {
 ;
 ; RV64IMZICOND-LABEL: select_add_3:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    addiw a2, a1, 42
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    li a2, 42
 ; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
-; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    addw a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 entry:
   %c = add i32 %a, 42
@@ -770,19 +754,15 @@ define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IM-LABEL: select_sub_2:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    bnez a0, .LBB20_2
-; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    subw a1, a1, a2
-; RV64IM-NEXT:  .LBB20_2: # %entry
-; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    addiw a0, a0, -1
+; RV64IM-NEXT:    and a0, a0, a2
+; RV64IM-NEXT:    subw a0, a1, a0
 ; RV64IM-NEXT:    ret
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_sub_2:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
-; RV64IMXVTCONDOPS-NEXT:    subw a2, a1, a2
-; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    subw a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_sub_2:
@@ -793,10 +773,8 @@ define i32 @select_sub_2(i1 zeroext %cond, i32 %a, i32 %b) {
 ;
 ; RV64IMZICOND-LABEL: select_sub_2:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    subw a2, a1, a2
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
 ; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
-; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    subw a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 entry:
   %c = sub i32 %a, %b
@@ -814,19 +792,16 @@ define i32 @select_sub_3(i1 zeroext %cond, i32 %a) {
 ;
 ; RV64IM-LABEL: select_sub_3:
 ; RV64IM:       # %bb.0: # %entry
-; RV64IM-NEXT:    bnez a0, .LBB21_2
-; RV64IM-NEXT:  # %bb.1: # %entry
-; RV64IM-NEXT:    addiw a1, a1, -42
-; RV64IM-NEXT:  .LBB21_2: # %entry
-; RV64IM-NEXT:    mv a0, a1
+; RV64IM-NEXT:    addiw a0, a0, -1
+; RV64IM-NEXT:    andi a0, a0, 42
+; RV64IM-NEXT:    subw a0, a1, a0
 ; RV64IM-NEXT:    ret
 ;
 ; RV64IMXVTCONDOPS-LABEL: select_sub_3:
 ; RV64IMXVTCONDOPS:       # %bb.0: # %entry
-; RV64IMXVTCONDOPS-NEXT:    addiw a2, a1, -42
-; RV64IMXVTCONDOPS-NEXT:    vt.maskc a1, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    li a2, 42
 ; RV64IMXVTCONDOPS-NEXT:    vt.maskcn a0, a2, a0
-; RV64IMXVTCONDOPS-NEXT:    or a0, a1, a0
+; RV64IMXVTCONDOPS-NEXT:    subw a0, a1, a0
 ; RV64IMXVTCONDOPS-NEXT:    ret
 ;
 ; RV32IMZICOND-LABEL: select_sub_3:
@@ -838,10 +813,9 @@ define i32 @select_sub_3(i1 zeroext %cond, i32 %a) {
 ;
 ; RV64IMZICOND-LABEL: select_sub_3:
 ; RV64IMZICOND:       # %bb.0: # %entry
-; RV64IMZICOND-NEXT:    addiw a2, a1, -42
-; RV64IMZICOND-NEXT:    czero.eqz a1, a1, a0
+; RV64IMZICOND-NEXT:    li a2, 42
 ; RV64IMZICOND-NEXT:    czero.nez a0, a2, a0
-; RV64IMZICOND-NEXT:    or a0, a1, a0
+; RV64IMZICOND-NEXT:    subw a0, a1, a0
 ; RV64IMZICOND-NEXT:    ret
 entry:
   %c = sub i32 %a, 42

diff  --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll
index 4ebff15bee2557..1bd583e7b03671 100644
--- a/llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -1040,37 +1040,37 @@ define signext i32 @bug(i32 signext %x) {
 ; CHECK-NEXT:    li a1, 32
 ; CHECK-NEXT:    j .LBB18_4
 ; CHECK-NEXT:  .LBB18_3:
-; CHECK-NEXT:    slliw a0, a0, 16
+; CHECK-NEXT:    slli a0, a0, 16
 ; CHECK-NEXT:    li a1, 16
 ; CHECK-NEXT:  .LBB18_4: # %if.end
 ; CHECK-NEXT:    srliw a3, a0, 24
 ; CHECK-NEXT:    snez a2, a3
 ; CHECK-NEXT:    bnez a3, .LBB18_6
 ; CHECK-NEXT:  # %bb.5:
-; CHECK-NEXT:    slliw a0, a0, 8
+; CHECK-NEXT:    slli a0, a0, 8
 ; CHECK-NEXT:  .LBB18_6: # %if.end
-; CHECK-NEXT:    addiw a2, a2, -1
+; CHECK-NEXT:    addi a2, a2, -1
 ; CHECK-NEXT:    andi a2, a2, -8
 ; CHECK-NEXT:    add a1, a1, a2
 ; CHECK-NEXT:    srliw a3, a0, 28
 ; CHECK-NEXT:    snez a2, a3
 ; CHECK-NEXT:    bnez a3, .LBB18_8
 ; CHECK-NEXT:  # %bb.7:
-; CHECK-NEXT:    slliw a0, a0, 4
+; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:  .LBB18_8: # %if.end
-; CHECK-NEXT:    addiw a2, a2, -1
+; CHECK-NEXT:    addi a2, a2, -1
 ; CHECK-NEXT:    andi a2, a2, -4
 ; CHECK-NEXT:    add a1, a1, a2
 ; CHECK-NEXT:    srliw a3, a0, 30
 ; CHECK-NEXT:    snez a2, a3
 ; CHECK-NEXT:    bnez a3, .LBB18_10
 ; CHECK-NEXT:  # %bb.9:
-; CHECK-NEXT:    slliw a0, a0, 2
+; CHECK-NEXT:    slli a0, a0, 2
 ; CHECK-NEXT:  .LBB18_10: # %if.end
-; CHECK-NEXT:    addiw a2, a2, -1
+; CHECK-NEXT:    addi a2, a2, -1
 ; CHECK-NEXT:    andi a2, a2, -2
+; CHECK-NEXT:    sraiw a0, a0, 31
 ; CHECK-NEXT:    not a0, a0
-; CHECK-NEXT:    srli a0, a0, 31
 ; CHECK-NEXT:    add a0, a2, a0
 ; CHECK-NEXT:    addw a0, a1, a0
 ; CHECK-NEXT:  .LBB18_11: # %cleanup
@@ -1095,7 +1095,7 @@ define signext i32 @bug(i32 signext %x) {
 ; NOREMOVAL-NEXT:  # %bb.5:
 ; NOREMOVAL-NEXT:    slli a0, a0, 8
 ; NOREMOVAL-NEXT:  .LBB18_6: # %if.end
-; NOREMOVAL-NEXT:    addiw a2, a2, -1
+; NOREMOVAL-NEXT:    addi a2, a2, -1
 ; NOREMOVAL-NEXT:    andi a2, a2, -8
 ; NOREMOVAL-NEXT:    add a1, a1, a2
 ; NOREMOVAL-NEXT:    srliw a3, a0, 28
@@ -1104,7 +1104,7 @@ define signext i32 @bug(i32 signext %x) {
 ; NOREMOVAL-NEXT:  # %bb.7:
 ; NOREMOVAL-NEXT:    slli a0, a0, 4
 ; NOREMOVAL-NEXT:  .LBB18_8: # %if.end
-; NOREMOVAL-NEXT:    addiw a2, a2, -1
+; NOREMOVAL-NEXT:    addi a2, a2, -1
 ; NOREMOVAL-NEXT:    andi a2, a2, -4
 ; NOREMOVAL-NEXT:    add a1, a1, a2
 ; NOREMOVAL-NEXT:    srliw a3, a0, 30
@@ -1113,14 +1113,14 @@ define signext i32 @bug(i32 signext %x) {
 ; NOREMOVAL-NEXT:  # %bb.9:
 ; NOREMOVAL-NEXT:    slli a0, a0, 2
 ; NOREMOVAL-NEXT:  .LBB18_10: # %if.end
-; NOREMOVAL-NEXT:    sext.w a0, a0
-; NOREMOVAL-NEXT:    addiw a2, a2, -1
+; NOREMOVAL-NEXT:    addi a2, a2, -1
 ; NOREMOVAL-NEXT:    andi a2, a2, -2
+; NOREMOVAL-NEXT:    sraiw a0, a0, 31
 ; NOREMOVAL-NEXT:    not a0, a0
-; NOREMOVAL-NEXT:    srli a0, a0, 31
 ; NOREMOVAL-NEXT:    add a0, a2, a0
-; NOREMOVAL-NEXT:    addw a0, a1, a0
+; NOREMOVAL-NEXT:    add a0, a1, a0
 ; NOREMOVAL-NEXT:  .LBB18_11: # %cleanup
+; NOREMOVAL-NEXT:    sext.w a0, a0
 ; NOREMOVAL-NEXT:    ret
 entry:
   %tobool.not = icmp eq i32 %x, 0


        


More information about the llvm-commits mailing list