[llvm] [RISCV] Enable the TypePromotion pass from AArch64/ARM. (PR #81574)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 12 21:57:18 PST 2024


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/81574

This pass looks for unsigned icmps that have illegal types and tries
to widen the use/def graph to improve the placement of the zero
extends that type legalization would need to insert.
    
I've explicitly disabled it for i32 by adding a check for
isSExtCheaperThanZExt to the pass.
    
The generated code isn't perfect on the lit test, but my data shows a net
dynamic instruction count improvement on spec2017 for both base and
Zba+Zbb+Zbs.

Spec2017 dynamic count for rv64gc_zba_zbb_zbs

| Metric |Before | After | Before/After |
|-------|------|-----|------|
| 500.perlbench_r | 3032622956407 | 3032363566062 | 99.991% |
| 502.gcc_r | 1231223971793 | 1226644351851 | 99.628% |
| 505.mcf_r | 696461864012 | 696461368989 | 100.000% |
| 520.omnetpp_r | 1142136357540 | 1142234473240 | 100.009% |
| 523.xalancbmk_r | 1157112062701 | 1148472443956 | 99.253% |
| 525.x264_r | 3970723342408 | 3970671717801 | 99.999% |
| 531.deepsjeng_r | 1833000391081 | 1833000391091 | 100.000% |
| 541.leela_r | 2059859108461 | 2059858783398 | 100.000% |
| 557.xz_r | 1806198505643 | 1806198505653 | 100.000% |

>From 12d37cfe402dbad90cc89822ab815adfbd8288bd Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 12 Feb 2024 21:17:53 -0800
Subject: [PATCH 1/2] [RISCV] Copy typepromotion-overflow.ll from AArch64. NFC

---
 .../CodeGen/RISCV/typepromotion-overflow.ll   | 388 ++++++++++++++++++
 1 file changed, 388 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/typepromotion-overflow.ll

diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
new file mode 100644
index 00000000000000..fad9e6c0756b36
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
@@ -0,0 +1,388 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+m %s -o - | FileCheck %s
+
+define zeroext i16 @overflow_add(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_add:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    ori a0, a0, 1
+; CHECK-NEXT:    slli a0, a0, 48
+; CHECK-NEXT:    srli a1, a0, 48
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    li a0, 2
+; CHECK-NEXT:    bltu a2, a1, .LBB0_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 5
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    ret
+  %add = add i16 %b, %a
+  %or = or i16 %add, 1
+  %cmp = icmp ugt i16 %or, 1024
+  %res = select i1 %cmp, i16 2, i16 5
+  ret i16 %res
+}
+
+define zeroext i16 @overflow_sub(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_sub:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    subw a0, a0, a1
+; CHECK-NEXT:    ori a0, a0, 1
+; CHECK-NEXT:    slli a0, a0, 48
+; CHECK-NEXT:    srli a1, a0, 48
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    li a0, 2
+; CHECK-NEXT:    bltu a2, a1, .LBB1_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 5
+; CHECK-NEXT:  .LBB1_2:
+; CHECK-NEXT:    ret
+  %add = sub i16 %a, %b
+  %or = or i16 %add, 1
+  %cmp = icmp ugt i16 %or, 1024
+  %res = select i1 %cmp, i16 2, i16 5
+  ret i16 %res
+}
+
+define zeroext i16 @overflow_mul(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_mul:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mul a0, a1, a0
+; CHECK-NEXT:    ori a0, a0, 1
+; CHECK-NEXT:    slli a0, a0, 48
+; CHECK-NEXT:    srli a1, a0, 48
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    li a0, 2
+; CHECK-NEXT:    bltu a2, a1, .LBB2_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 5
+; CHECK-NEXT:  .LBB2_2:
+; CHECK-NEXT:    ret
+  %add = mul i16 %b, %a
+  %or = or i16 %add, 1
+  %cmp = icmp ugt i16 %or, 1024
+  %res = select i1 %cmp, i16 2, i16 5
+  ret i16 %res
+}
+
+define zeroext i16 @overflow_shl(i16 zeroext %a, i16 zeroext %b) {
+; CHECK-LABEL: overflow_shl:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sll a0, a0, a1
+; CHECK-NEXT:    ori a0, a0, 1
+; CHECK-NEXT:    slli a0, a0, 48
+; CHECK-NEXT:    srli a1, a0, 48
+; CHECK-NEXT:    li a2, 1024
+; CHECK-NEXT:    li a0, 2
+; CHECK-NEXT:    bltu a2, a1, .LBB3_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 5
+; CHECK-NEXT:  .LBB3_2:
+; CHECK-NEXT:    ret
+  %add = shl i16 %a, %b
+  %or = or i16 %add, 1
+  %cmp = icmp ugt i16 %or, 1024
+  %res = select i1 %cmp, i16 2, i16 5
+  ret i16 %res
+}
+
+define i32 @overflow_add_no_consts(i8 zeroext %a, i8 zeroext %b, i8 zeroext %limit) {
+; CHECK-LABEL: overflow_add_no_consts:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    bltu a2, a1, .LBB4_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB4_2:
+; CHECK-NEXT:    ret
+  %add = add i8 %b, %a
+  %cmp = icmp ugt i8 %add, %limit
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
+; CHECK-LABEL: overflow_add_const_limit:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    li a2, 128
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    bltu a2, a1, .LBB5_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB5_2:
+; CHECK-NEXT:    ret
+  %add = add i8 %b, %a
+  %cmp = icmp ugt i8 %add, -128
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
+; CHECK-LABEL: overflow_add_positive_const_limit:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    slli a0, a0, 56
+; CHECK-NEXT:    srai a1, a0, 56
+; CHECK-NEXT:    li a2, -1
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    blt a1, a2, .LBB6_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB6_2:
+; CHECK-NEXT:    ret
+  %cmp = icmp slt i8 %a, -1
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @unsafe_add_underflow(i8 zeroext %a) {
+; CHECK-LABEL: unsafe_add_underflow:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    li a2, 1
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    beq a1, a2, .LBB7_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB7_2:
+; CHECK-NEXT:    ret
+  %cmp = icmp eq i8 %a, 1
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @safe_add_underflow(i8 zeroext %a) {
+; CHECK-LABEL: safe_add_underflow:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    beqz a1, .LBB8_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB8_2:
+; CHECK-NEXT:    ret
+  %cmp = icmp eq i8 %a, 0
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @safe_add_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: safe_add_underflow_neg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, a0, -2
+; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    li a2, 251
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    bltu a1, a2, .LBB9_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB9_2:
+; CHECK-NEXT:    ret
+  %add = add i8 %a, -2
+  %cmp = icmp ult i8 %add, -5
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
+; CHECK-LABEL: overflow_sub_negative_const_limit:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    slli a0, a0, 56
+; CHECK-NEXT:    srai a1, a0, 56
+; CHECK-NEXT:    li a2, -1
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    blt a1, a2, .LBB10_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB10_2:
+; CHECK-NEXT:    ret
+  %cmp = icmp slt i8 %a, -1
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+; This is valid so long as the icmp immediate is sext.
+define i32 @sext_sub_underflow(i8 zeroext %a) {
+; CHECK-LABEL: sext_sub_underflow:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, a0, -6
+; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    li a2, 250
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    bltu a2, a1, .LBB11_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB11_2:
+; CHECK-NEXT:    ret
+  %sub = add i8 %a, -6
+  %cmp = icmp ugt i8 %sub, -6
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @safe_sub_underflow(i8 zeroext %a) {
+; CHECK-LABEL: safe_sub_underflow:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    mv a1, a0
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:    beqz a1, .LBB12_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:  .LBB12_2:
+; CHECK-NEXT:    ret
+  %cmp.not = icmp eq i8 %a, 0
+  %res = select i1 %cmp.not, i32 16, i32 8
+  ret i32 %res
+}
+
+define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: safe_sub_underflow_neg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, a0, -4
+; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    li a2, 250
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    bltu a2, a1, .LBB13_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB13_2:
+; CHECK-NEXT:    ret
+  %sub = add i8 %a, -4
+  %cmp = icmp ugt i8 %sub, -6
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+; This is valid so long as the icmp immediate is sext.
+define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
+; CHECK-LABEL: sext_sub_underflow_neg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi a0, a0, -4
+; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    li a2, 253
+; CHECK-NEXT:    li a0, 8
+; CHECK-NEXT:    bltu a1, a2, .LBB14_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 16
+; CHECK-NEXT:  .LBB14_2:
+; CHECK-NEXT:    ret
+  %sub = add i8 %a, -4
+  %cmp = icmp ult i8 %sub, -3
+  %res = select i1 %cmp, i32 8, i32 16
+  ret i32 %res
+}
+
+define i32 @safe_sub_imm_var(ptr nocapture readonly %b) local_unnamed_addr #1 {
+; CHECK-LABEL: safe_sub_imm_var:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li a0, 0
+; CHECK-NEXT:    ret
+entry:
+  ret i32 0
+}
+
+define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
+; CHECK-LABEL: safe_sub_var_imm:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    addi a0, a0, 8
+; CHECK-NEXT:    andi a0, a0, 255
+; CHECK-NEXT:    sltiu a0, a0, 253
+; CHECK-NEXT:    xori a0, a0, 1
+; CHECK-NEXT:    ret
+entry:
+  %0 = load i8, ptr %b, align 1
+  %sub = add nsw i8 %0, 8
+  %cmp = icmp ugt i8 %sub, -4
+  %conv4 = zext i1 %cmp to i32
+  ret i32 %conv4
+}
+
+define i32 @safe_add_imm_var(ptr nocapture readnone %b) {
+; CHECK-LABEL: safe_add_imm_var:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    ret
+entry:
+  ret i32 1
+}
+
+define i32 @safe_add_var_imm(ptr nocapture readnone %b) {
+; CHECK-LABEL: safe_add_var_imm:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    li a0, 1
+; CHECK-NEXT:    ret
+entry:
+  ret i32 1
+}
+
+define i8 @convert_add_order(i8 zeroext %arg) {
+; CHECK-LABEL: convert_add_order:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    ori a1, a0, 1
+; CHECK-NEXT:    sltiu a2, a1, 50
+; CHECK-NEXT:    addi a1, a1, -40
+; CHECK-NEXT:    andi a1, a1, 255
+; CHECK-NEXT:    sltiu a1, a1, 20
+; CHECK-NEXT:    li a3, 2
+; CHECK-NEXT:    sub a3, a3, a1
+; CHECK-NEXT:    addi a2, a2, -1
+; CHECK-NEXT:    or a2, a2, a3
+; CHECK-NEXT:    and a0, a2, a0
+; CHECK-NEXT:    ret
+  %shl = or i8 %arg, 1
+  %cmp.0 = icmp ult i8 %shl, 50
+  %sub = add nsw i8 %shl, -40
+  %cmp.1 = icmp ult i8 %sub, 20
+  %mask.sel.v = select i1 %cmp.1, i8 1, i8 2
+  %mask.sel = select i1 %cmp.0, i8 %mask.sel.v, i8 -1
+  %res = and i8 %mask.sel, %arg
+  ret i8 %res
+}
+
+define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
+; CHECK-LABEL: underflow_if_sub:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sext.w a2, a0
+; CHECK-NEXT:    sgtz a2, a2
+; CHECK-NEXT:    and a0, a2, a0
+; CHECK-NEXT:    addi a0, a0, -11
+; CHECK-NEXT:    andi a2, a0, 247
+; CHECK-NEXT:    bltu a2, a1, .LBB20_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 100
+; CHECK-NEXT:  .LBB20_2:
+; CHECK-NEXT:    ret
+  %cmp = icmp sgt i32 %arg, 0
+  %conv = zext i1 %cmp to i32
+  %and = and i32 %conv, %arg
+  %trunc = trunc i32 %and to i8
+  %conv1 = add nuw nsw i8 %trunc, -11
+  %cmp.1 = icmp ult i8 %conv1, %arg1
+  %res = select i1 %cmp.1, i8 %conv1, i8 100
+  ret i8 %res
+}
+
+define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
+; CHECK-LABEL: underflow_if_sub_signext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    sext.w a2, a0
+; CHECK-NEXT:    sgtz a2, a2
+; CHECK-NEXT:    and a0, a2, a0
+; CHECK-NEXT:    addi a0, a0, -11
+; CHECK-NEXT:    bltu a0, a1, .LBB21_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a0, 100
+; CHECK-NEXT:  .LBB21_2:
+; CHECK-NEXT:    ret
+  %cmp = icmp sgt i32 %arg, 0
+  %conv = zext i1 %cmp to i32
+  %and = and i32 %conv, %arg
+  %trunc = trunc i32 %and to i8
+  %conv1 = add nuw nsw i8 %trunc, -11
+  %cmp.1 = icmp ult i8 %conv1, %arg1
+  %res = select i1 %cmp.1, i8 %conv1, i8 100
+  ret i8 %res
+}

>From e47c3e32a5a39b8212edc3703aa4dce1f84a7be8 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Mon, 12 Feb 2024 21:43:35 -0800
Subject: [PATCH 2/2] [RISCV] Enable the TypePromotion pass from AArch64/ARM.

This pass looks for unsigned icmps that have illegal types and tries
to widen the use/def graph to improve the placement of the zero
extends that type legalization would need to insert.

I've explicitly disabled it for i32 by adding a check for
isSExtCheaperThanZExt to the pass.

The generated code isn't perfect, but my data shows a net
dynamic instruction count improvement on spec2017 for both base and
Zba+Zbb+Zbs.
---
 llvm/lib/CodeGen/TypePromotion.cpp            |   2 +
 llvm/lib/Target/RISCV/RISCVTargetMachine.cpp  |   7 ++
 llvm/test/CodeGen/RISCV/O3-pipeline.ll        |   1 +
 .../RISCV/lack-of-signed-truncation-check.ll  |  92 +++++++++++-----
 llvm/test/CodeGen/RISCV/signbit-test.ll       |  30 ++++-
 .../CodeGen/RISCV/signed-truncation-check.ll  | 104 ++++++++++++------
 .../CodeGen/RISCV/typepromotion-overflow.ll   |  41 ++++---
 7 files changed, 190 insertions(+), 87 deletions(-)

diff --git a/llvm/lib/CodeGen/TypePromotion.cpp b/llvm/lib/CodeGen/TypePromotion.cpp
index 053caf518bd1f7..7a3bc6c2043f4c 100644
--- a/llvm/lib/CodeGen/TypePromotion.cpp
+++ b/llvm/lib/CodeGen/TypePromotion.cpp
@@ -937,6 +937,8 @@ bool TypePromotionImpl::run(Function &F, const TargetMachine *TM,
       return 0;
 
     EVT PromotedVT = TLI->getTypeToTransformTo(*Ctx, SrcVT);
+    if (TLI->isSExtCheaperThanZExt(SrcVT, PromotedVT))
+      return 0;
     if (RegisterBitWidth < PromotedVT.getFixedSizeInBits()) {
       LLVM_DEBUG(dbgs() << "IR Promotion: Couldn't find target register "
                         << "for promoted type\n");
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 4c3da3ad311168..adef40e19cba4a 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -366,6 +366,7 @@ class RISCVPassConfig : public TargetPassConfig {
 
   void addIRPasses() override;
   bool addPreISel() override;
+  void addCodeGenPrepare() override;
   bool addInstSelector() override;
   bool addIRTranslator() override;
   void addPreLegalizeMachineIR() override;
@@ -452,6 +453,12 @@ bool RISCVPassConfig::addPreISel() {
   return false;
 }
 
+void RISCVPassConfig::addCodeGenPrepare() {
+  if (getOptLevel() != CodeGenOptLevel::None)
+    addPass(createTypePromotionLegacyPass());
+  TargetPassConfig::addCodeGenPrepare();
+}
+
 bool RISCVPassConfig::addInstSelector() {
   addPass(createRISCVISelDag(getRISCVTargetMachine(), getOptLevel()));
 
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index e7db8ef9d5aff3..364c1e430b9156 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -68,6 +68,7 @@
 ; CHECK-NEXT:       Expand reduction intrinsics
 ; CHECK-NEXT:       Natural Loop Information
 ; CHECK-NEXT:       TLS Variable Hoist
+; CHECK-NEXT:       Type Promotion
 ; CHECK-NEXT:       CodeGen Prepare
 ; CHECK-NEXT:       Dominator Tree Construction
 ; CHECK-NEXT:       Exception handling preparation
diff --git a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
index 9e7f2e9525d3b4..6e3a50542939f1 100644
--- a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
@@ -254,21 +254,39 @@ define i1 @shifts_necmp_i64_i8(i64 %x) nounwind {
 ; ---------------------------------------------------------------------------- ;
 
 define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ultcmp_i16_i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, -128
-; RV32-NEXT:    slli a0, a0, 16
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    sltiu a0, a0, 255
-; RV32-NEXT:    ret
+; RV32I-LABEL: add_ultcmp_i16_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    addi a0, a0, -128
+; RV32I-NEXT:    srli a0, a0, 8
+; RV32I-NEXT:    sltiu a0, a0, 255
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: add_ultcmp_i16_i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, a0, -128
-; RV64-NEXT:    slli a0, a0, 48
-; RV64-NEXT:    srli a0, a0, 56
-; RV64-NEXT:    sltiu a0, a0, 255
-; RV64-NEXT:    ret
+; RV64I-LABEL: add_ultcmp_i16_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    addi a0, a0, -128
+; RV64I-NEXT:    srli a0, a0, 8
+; RV64I-NEXT:    sltiu a0, a0, 255
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: add_ultcmp_i16_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a0, a0
+; RV32ZBB-NEXT:    addi a0, a0, -128
+; RV32ZBB-NEXT:    srli a0, a0, 8
+; RV32ZBB-NEXT:    sltiu a0, a0, 255
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: add_ultcmp_i16_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.h a0, a0
+; RV64ZBB-NEXT:    addi a0, a0, -128
+; RV64ZBB-NEXT:    srli a0, a0, 8
+; RV64ZBB-NEXT:    sltiu a0, a0, 255
+; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
   %tmp1 = icmp ult i16 %tmp0, -256 ; ~0U << 8
   ret i1 %tmp1
@@ -421,21 +439,39 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
 
 ; Slightly more canonical variant
 define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ulecmp_i16_i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, -128
-; RV32-NEXT:    slli a0, a0, 16
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    sltiu a0, a0, 255
-; RV32-NEXT:    ret
+; RV32I-LABEL: add_ulecmp_i16_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    addi a0, a0, -128
+; RV32I-NEXT:    srli a0, a0, 8
+; RV32I-NEXT:    sltiu a0, a0, 255
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: add_ulecmp_i16_i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, a0, -128
-; RV64-NEXT:    slli a0, a0, 48
-; RV64-NEXT:    srli a0, a0, 56
-; RV64-NEXT:    sltiu a0, a0, 255
-; RV64-NEXT:    ret
+; RV64I-LABEL: add_ulecmp_i16_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    addi a0, a0, -128
+; RV64I-NEXT:    srli a0, a0, 8
+; RV64I-NEXT:    sltiu a0, a0, 255
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: add_ulecmp_i16_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a0, a0
+; RV32ZBB-NEXT:    addi a0, a0, -128
+; RV32ZBB-NEXT:    srli a0, a0, 8
+; RV32ZBB-NEXT:    sltiu a0, a0, 255
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: add_ulecmp_i16_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.h a0, a0
+; RV64ZBB-NEXT:    addi a0, a0, -128
+; RV64ZBB-NEXT:    srli a0, a0, 8
+; RV64ZBB-NEXT:    sltiu a0, a0, 255
+; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
   %tmp1 = icmp ule i16 %tmp0, -257 ; ~0U << 8 - 1
   ret i1 %tmp1
diff --git a/llvm/test/CodeGen/RISCV/signbit-test.ll b/llvm/test/CodeGen/RISCV/signbit-test.ll
index 69a9026d9af9e2..4e10fae06d8860 100644
--- a/llvm/test/CodeGen/RISCV/signbit-test.ll
+++ b/llvm/test/CodeGen/RISCV/signbit-test.ll
@@ -303,7 +303,10 @@ define i16 @test_clear_mask_i16_i8(i16 %x) nounwind {
 ; RV32-NEXT:    bnez a1, .LBB10_2
 ; RV32-NEXT:  # %bb.1: # %t
 ; RV32-NEXT:    li a0, 42
-; RV32-NEXT:  .LBB10_2: # %f
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB10_2:
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_clear_mask_i16_i8:
@@ -312,7 +315,10 @@ define i16 @test_clear_mask_i16_i8(i16 %x) nounwind {
 ; RV64-NEXT:    bnez a1, .LBB10_2
 ; RV64-NEXT:  # %bb.1: # %t
 ; RV64-NEXT:    li a0, 42
-; RV64-NEXT:  .LBB10_2: # %f
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB10_2:
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    ret
 entry:
   %a = and i16 %x, 128
@@ -332,7 +338,10 @@ define i16 @test_set_mask_i16_i8(i16 %x) nounwind {
 ; RV32-NEXT:    beqz a1, .LBB11_2
 ; RV32-NEXT:  # %bb.1: # %t
 ; RV32-NEXT:    li a0, 42
-; RV32-NEXT:  .LBB11_2: # %f
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB11_2:
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_set_mask_i16_i8:
@@ -341,7 +350,10 @@ define i16 @test_set_mask_i16_i8(i16 %x) nounwind {
 ; RV64-NEXT:    beqz a1, .LBB11_2
 ; RV64-NEXT:  # %bb.1: # %t
 ; RV64-NEXT:    li a0, 42
-; RV64-NEXT:  .LBB11_2: # %f
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB11_2:
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    ret
 entry:
   %a = and i16 %x, 128
@@ -361,7 +373,10 @@ define i16 @test_set_mask_i16_i7(i16 %x) nounwind {
 ; RV32-NEXT:    beqz a1, .LBB12_2
 ; RV32-NEXT:  # %bb.1: # %t
 ; RV32-NEXT:    li a0, 42
-; RV32-NEXT:  .LBB12_2: # %f
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB12_2:
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: test_set_mask_i16_i7:
@@ -370,7 +385,10 @@ define i16 @test_set_mask_i16_i7(i16 %x) nounwind {
 ; RV64-NEXT:    beqz a1, .LBB12_2
 ; RV64-NEXT:  # %bb.1: # %t
 ; RV64-NEXT:    li a0, 42
-; RV64-NEXT:  .LBB12_2: # %f
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB12_2:
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
 ; RV64-NEXT:    ret
 entry:
   %a = and i16 %x, 64
diff --git a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
index 0860853ae9c0af..de36bcdb910609 100644
--- a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
@@ -254,23 +254,43 @@ define i1 @shifts_eqcmp_i64_i8(i64 %x) nounwind {
 ; ---------------------------------------------------------------------------- ;
 
 define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ugecmp_i16_i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, -128
-; RV32-NEXT:    slli a0, a0, 16
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    sltiu a0, a0, 255
-; RV32-NEXT:    xori a0, a0, 1
-; RV32-NEXT:    ret
+; RV32I-LABEL: add_ugecmp_i16_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    addi a0, a0, -128
+; RV32I-NEXT:    srli a0, a0, 8
+; RV32I-NEXT:    sltiu a0, a0, 255
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: add_ugecmp_i16_i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, a0, -128
-; RV64-NEXT:    slli a0, a0, 48
-; RV64-NEXT:    srli a0, a0, 56
-; RV64-NEXT:    sltiu a0, a0, 255
-; RV64-NEXT:    xori a0, a0, 1
-; RV64-NEXT:    ret
+; RV64I-LABEL: add_ugecmp_i16_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    addi a0, a0, -128
+; RV64I-NEXT:    srli a0, a0, 8
+; RV64I-NEXT:    sltiu a0, a0, 255
+; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: add_ugecmp_i16_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a0, a0
+; RV32ZBB-NEXT:    addi a0, a0, -128
+; RV32ZBB-NEXT:    srli a0, a0, 8
+; RV32ZBB-NEXT:    sltiu a0, a0, 255
+; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: add_ugecmp_i16_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.h a0, a0
+; RV64ZBB-NEXT:    addi a0, a0, -128
+; RV64ZBB-NEXT:    srli a0, a0, 8
+; RV64ZBB-NEXT:    sltiu a0, a0, 255
+; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
   %tmp1 = icmp uge i16 %tmp0, -256 ; ~0U << 8
   ret i1 %tmp1
@@ -471,23 +491,43 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
 
 ; Slightly more canonical variant
 define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
-; RV32-LABEL: add_ugtcmp_i16_i8:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, -128
-; RV32-NEXT:    slli a0, a0, 16
-; RV32-NEXT:    srli a0, a0, 24
-; RV32-NEXT:    sltiu a0, a0, 255
-; RV32-NEXT:    xori a0, a0, 1
-; RV32-NEXT:    ret
+; RV32I-LABEL: add_ugtcmp_i16_i8:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    slli a0, a0, 16
+; RV32I-NEXT:    srli a0, a0, 16
+; RV32I-NEXT:    addi a0, a0, -128
+; RV32I-NEXT:    srli a0, a0, 8
+; RV32I-NEXT:    sltiu a0, a0, 255
+; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    ret
 ;
-; RV64-LABEL: add_ugtcmp_i16_i8:
-; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, a0, -128
-; RV64-NEXT:    slli a0, a0, 48
-; RV64-NEXT:    srli a0, a0, 56
-; RV64-NEXT:    sltiu a0, a0, 255
-; RV64-NEXT:    xori a0, a0, 1
-; RV64-NEXT:    ret
+; RV64I-LABEL: add_ugtcmp_i16_i8:
+; RV64I:       # %bb.0:
+; RV64I-NEXT:    slli a0, a0, 48
+; RV64I-NEXT:    srli a0, a0, 48
+; RV64I-NEXT:    addi a0, a0, -128
+; RV64I-NEXT:    srli a0, a0, 8
+; RV64I-NEXT:    sltiu a0, a0, 255
+; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    ret
+;
+; RV32ZBB-LABEL: add_ugtcmp_i16_i8:
+; RV32ZBB:       # %bb.0:
+; RV32ZBB-NEXT:    zext.h a0, a0
+; RV32ZBB-NEXT:    addi a0, a0, -128
+; RV32ZBB-NEXT:    srli a0, a0, 8
+; RV32ZBB-NEXT:    sltiu a0, a0, 255
+; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    ret
+;
+; RV64ZBB-LABEL: add_ugtcmp_i16_i8:
+; RV64ZBB:       # %bb.0:
+; RV64ZBB-NEXT:    zext.h a0, a0
+; RV64ZBB-NEXT:    addi a0, a0, -128
+; RV64ZBB-NEXT:    srli a0, a0, 8
+; RV64ZBB-NEXT:    sltiu a0, a0, 255
+; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, -128 ; ~0U << (8-1)
   %tmp1 = icmp ugt i16 %tmp0, -257 ; ~0U << 8 - 1
   ret i1 %tmp1
diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
index fad9e6c0756b36..3740dc675949fa 100644
--- a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
@@ -171,8 +171,7 @@ define i32 @safe_add_underflow(i8 zeroext %a) {
 define i32 @safe_add_underflow_neg(i8 zeroext %a) {
 ; CHECK-LABEL: safe_add_underflow_neg:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -2
-; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    addi a1, a0, -2
 ; CHECK-NEXT:    li a2, 251
 ; CHECK-NEXT:    li a0, 8
 ; CHECK-NEXT:    bltu a1, a2, .LBB9_2
@@ -207,9 +206,8 @@ define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
 define i32 @sext_sub_underflow(i8 zeroext %a) {
 ; CHECK-LABEL: sext_sub_underflow:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -6
-; CHECK-NEXT:    andi a1, a0, 255
-; CHECK-NEXT:    li a2, 250
+; CHECK-NEXT:    addi a1, a0, -6
+; CHECK-NEXT:    li a2, -6
 ; CHECK-NEXT:    li a0, 8
 ; CHECK-NEXT:    bltu a2, a1, .LBB11_2
 ; CHECK-NEXT:  # %bb.1:
@@ -240,8 +238,7 @@ define i32 @safe_sub_underflow(i8 zeroext %a) {
 define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
 ; CHECK-LABEL: safe_sub_underflow_neg:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    andi a1, a0, 255
+; CHECK-NEXT:    addi a1, a0, -4
 ; CHECK-NEXT:    li a2, 250
 ; CHECK-NEXT:    li a0, 8
 ; CHECK-NEXT:    bltu a2, a1, .LBB13_2
@@ -259,9 +256,8 @@ define i32 @safe_sub_underflow_neg(i8 zeroext %a) {
 define i32 @sext_sub_underflow_neg(i8 zeroext %a) {
 ; CHECK-LABEL: sext_sub_underflow_neg:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi a0, a0, -4
-; CHECK-NEXT:    andi a1, a0, 255
-; CHECK-NEXT:    li a2, 253
+; CHECK-NEXT:    addi a1, a0, -4
+; CHECK-NEXT:    li a2, -3
 ; CHECK-NEXT:    li a0, 8
 ; CHECK-NEXT:    bltu a1, a2, .LBB14_2
 ; CHECK-NEXT:  # %bb.1:
@@ -322,15 +318,18 @@ define i8 @convert_add_order(i8 zeroext %arg) {
 ; CHECK-LABEL: convert_add_order:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ori a1, a0, 1
-; CHECK-NEXT:    sltiu a2, a1, 50
+; CHECK-NEXT:    li a2, 50
+; CHECK-NEXT:    bltu a1, a2, .LBB19_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    li a1, 255
+; CHECK-NEXT:    and a0, a1, a0
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB19_2:
 ; CHECK-NEXT:    addi a1, a1, -40
-; CHECK-NEXT:    andi a1, a1, 255
 ; CHECK-NEXT:    sltiu a1, a1, 20
-; CHECK-NEXT:    li a3, 2
-; CHECK-NEXT:    sub a3, a3, a1
-; CHECK-NEXT:    addi a2, a2, -1
-; CHECK-NEXT:    or a2, a2, a3
-; CHECK-NEXT:    and a0, a2, a0
+; CHECK-NEXT:    li a2, 2
+; CHECK-NEXT:    sub a1, a2, a1
+; CHECK-NEXT:    and a0, a1, a0
 ; CHECK-NEXT:    ret
   %shl = or i8 %arg, 1
   %cmp.0 = icmp ult i8 %shl, 50
@@ -348,9 +347,8 @@ define i8 @underflow_if_sub(i32 %arg, i8 zeroext %arg1) {
 ; CHECK-NEXT:    sext.w a2, a0
 ; CHECK-NEXT:    sgtz a2, a2
 ; CHECK-NEXT:    and a0, a2, a0
-; CHECK-NEXT:    addi a0, a0, -11
-; CHECK-NEXT:    andi a2, a0, 247
-; CHECK-NEXT:    bltu a2, a1, .LBB20_2
+; CHECK-NEXT:    addi a0, a0, 245
+; CHECK-NEXT:    bltu a0, a1, .LBB20_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a0, 100
 ; CHECK-NEXT:  .LBB20_2:
@@ -369,9 +367,10 @@ define i8 @underflow_if_sub_signext(i32 %arg, i8 signext %arg1) {
 ; CHECK-LABEL: underflow_if_sub_signext:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    sext.w a2, a0
+; CHECK-NEXT:    andi a1, a1, 255
 ; CHECK-NEXT:    sgtz a2, a2
 ; CHECK-NEXT:    and a0, a2, a0
-; CHECK-NEXT:    addi a0, a0, -11
+; CHECK-NEXT:    addi a0, a0, 245
 ; CHECK-NEXT:    bltu a0, a1, .LBB21_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a0, 100



More information about the llvm-commits mailing list