[llvm] [TypePromotion] Support positive addition amounts in isSafeWrap. (PR #81690)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 13 16:14:52 PST 2024


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/81690

We can support these by changing the sext promotion to -zext(-C) and replacing a sgt check with ugt. Reframing the logic in terms of how the unsigned range are affected.

More comments in the patch.

>From f6a482099643007f1723f4069673431842342ac7 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 13 Feb 2024 15:50:24 -0800
Subject: [PATCH] [TypePromotion] Support positive addition amounts in
 isSafeWrap.

We can support these by changing the sext promotion to -zext(-C)
and replacing a sgt check with ugt. Reframing the logic in terms of
how the unsigned range are affected.

More comments in the patch.
---
 llvm/lib/CodeGen/TypePromotion.cpp            | 109 ++++----
 llvm/test/CodeGen/AArch64/and-mask-removal.ll |  62 ++---
 .../lack-of-signed-truncation-check.ll        |  62 +++--
 .../AArch64/signed-truncation-check.ll        |  64 +++--
 .../CodeGen/AArch64/typepromotion-overflow.ll |   5 +-
 .../CodeGen/AArch64/typepromotion-signed.ll   |   7 +-
 .../RISCV/lack-of-signed-truncation-check.ll  | 240 +++++++++++-------
 .../CodeGen/RISCV/signed-truncation-check.ll  | 206 ++++++++++-----
 .../CodeGen/RISCV/typepromotion-overflow.ll   |   5 +-
 .../Transforms/TypePromotion/ARM/icmps.ll     |   7 +-
 .../Transforms/TypePromotion/ARM/wrapping.ll  |  10 +-
 11 files changed, 470 insertions(+), 307 deletions(-)

diff --git a/llvm/lib/CodeGen/TypePromotion.cpp b/llvm/lib/CodeGen/TypePromotion.cpp
index 48ad8de778010e..2116027f22c6bd 100644
--- a/llvm/lib/CodeGen/TypePromotion.cpp
+++ b/llvm/lib/CodeGen/TypePromotion.cpp
@@ -272,64 +272,58 @@ bool TypePromotionImpl::isSink(Value *V) {
 
 /// Return whether this instruction can safely wrap.
 bool TypePromotionImpl::isSafeWrap(Instruction *I) {
-  // We can support a potentially wrapping instruction (I) if:
+  // We can support a potentially wrapping Add/Sub instruction (I) if:
   // - It is only used by an unsigned icmp.
   // - The icmp uses a constant.
-  // - The wrapping value (I) is decreasing, i.e would underflow - wrapping
-  //   around zero to become a larger number than before.
   // - The wrapping instruction (I) also uses a constant.
   //
-  // We can then use the two constants to calculate whether the result would
-  // wrap in respect to itself in the original bitwidth. If it doesn't wrap,
-  // just underflows the range, the icmp would give the same result whether the
-  // result has been truncated or not. We calculate this by:
-  // - Zero extending both constants, if needed, to RegisterBitWidth.
-  // - Take the absolute value of I's constant, adding this to the icmp const.
-  // - Check that this value is not out of range for small type. If it is, it
-  //   means that it has underflowed enough to wrap around the icmp constant.
+  // This a common pattern emitted to check if a value is within a range.
   //
   // For example:
   //
-  // %sub = sub i8 %a, 2
-  // %cmp = icmp ule i8 %sub, 254
+  // %sub = sub i8 %a, C1
+  // %cmp = icmp ule i8 %sub, C2
+  //
+  // or
   //
-  // If %a = 0, %sub = -2 == FE == 254
-  // But if this is evalulated as a i32
-  // %sub = -2 == FF FF FF FE == 4294967294
-  // So the unsigned compares (i8 and i32) would not yield the same result.
+  // %add = add i8 %a, C1
+  // %cmp = icmp ule i8 %add, C2.
   //
-  // Another way to look at it is:
-  // %a - 2 <= 254
-  // %a + 2 <= 254 + 2
-  // %a <= 256
-  // And we can't represent 256 in the i8 format, so we don't support it.
+  // We will treat an add as though it were a subtract by -C1. To promote
+  // the Add/Sub we will zero extend the LHS and the subtracted amount. For Add,
+  // this means we need to negate the constant, zero extend to RegisterBitWidth,
+  // and negate in the larger type.
   //
-  // Whereas:
+  // This will produce a value in the range [-zext(C1), zext(X)-zext(C1)] where
+  // C1 is the subtracted amount. This is either a small unsigned number or a
+  // large unsigned number in the promoted type.
   //
-  // %sub i8 %a, 1
+  // Now we need to correct the compare constant C2. Values >= C1 in the
+  // original add result range have been remapped to large values in the
+  // promoted range. If the compare constant fell into this range we need to
+  // remap it as well. We can do this as -(zext(-C2)).
+  //
+  // For example:
+  //
+  // %sub = sub i8 %a, 2
   // %cmp = icmp ule i8 %sub, 254
   //
-  // If %a = 0, %sub = -1 == FF == 255
-  // As i32:
-  // %sub = -1 == FF FF FF FF == 4294967295
+  // becomes
   //
-  // In this case, the unsigned compare results would be the same and this
-  // would also be true for ult, uge and ugt:
-  // - (255 < 254) == (0xFFFFFFFF < 254) == false
-  // - (255 <= 254) == (0xFFFFFFFF <= 254) == false
-  // - (255 > 254) == (0xFFFFFFFF > 254) == true
-  // - (255 >= 254) == (0xFFFFFFFF >= 254) == true
+  // %zext = zext %a to i32
+  // %sub = sub i32 %zext, 2
+  // %cmp = icmp ule i32 %sub, 4294967294
   //
-  // To demonstrate why we can't handle increasing values:
+  // Another example:
   //
-  // %add = add i8 %a, 2
-  // %cmp = icmp ult i8 %add, 127
+  // %sub = sub i8 %a, 1
+  // %cmp = icmp ule i8 %sub, 254
   //
-  // If %a = 254, %add = 256 == (i8 1)
-  // As i32:
-  // %add = 256
+  // becomes
   //
-  // (1 < 127) != (256 < 127)
+  // %zext = zext %a to i32
+  // %sub = sub i32 %zext, 1
+  // %cmp = icmp ule i32 %sub, 254
 
   unsigned Opc = I->getOpcode();
   if (Opc != Instruction::Add && Opc != Instruction::Sub)
@@ -356,15 +350,10 @@ bool TypePromotionImpl::isSafeWrap(Instruction *I) {
   APInt OverflowConst = cast<ConstantInt>(I->getOperand(1))->getValue();
   if (Opc == Instruction::Sub)
     OverflowConst = -OverflowConst;
-  if (!OverflowConst.isNonPositive())
-    return false;
 
   SafeWrap.insert(I);
 
-  // Using C1 = OverflowConst and C2 = ICmpConst, we can either prove that:
-  //   zext(x) + sext(C1) <u zext(C2)  if C1 < 0 and C1 >s C2
-  //   zext(x) + sext(C1) <u sext(C2)  if C1 < 0 and C1 <=s C2
-  if (OverflowConst.sgt(ICmpConst)) {
+  if (OverflowConst.ugt(ICmpConst)) {
     LLVM_DEBUG(dbgs() << "IR Promotion: Allowing safe overflow for sext "
                       << "const of " << *I << "\n");
     return true;
@@ -487,18 +476,24 @@ void IRPromoter::PromoteTree() {
         continue;
 
       if (auto *Const = dyn_cast<ConstantInt>(Op)) {
-        // For subtract, we don't need to sext the constant. We only put it in
+        // For subtract, we only need to zext the constant. We only put it in
         // SafeWrap because SafeWrap.size() is used elsewhere.
-        // For cmp, we need to sign extend a constant appearing in either
-        // operand. For add, we should only sign extend the RHS.
-        Constant *NewConst =
-            ConstantInt::get(Const->getContext(),
-                             (SafeWrap.contains(I) &&
-                              (I->getOpcode() == Instruction::ICmp || i == 1) &&
-                              I->getOpcode() != Instruction::Sub)
-                                 ? Const->getValue().sext(PromotedWidth)
-                                 : Const->getValue().zext(PromotedWidth));
-        I->setOperand(i, NewConst);
+        // For Add and ICmp we need to find how far the constant is from the
+        // top of its original unsigned range and place it the same distance
+        // from the top of its new unsigned range. We can do this by negating
+        // the constant, zero extending it, then negating in the new type.
+        APInt NewConst;
+        if (SafeWrap.contains(I)) {
+          if (I->getOpcode() == Instruction::ICmp)
+            NewConst = -((-Const->getValue()).zext(PromotedWidth));
+          else if (I->getOpcode() == Instruction::Add && i == 1)
+            NewConst = -((-Const->getValue()).zext(PromotedWidth));
+          else
+            NewConst = Const->getValue().zext(PromotedWidth);
+        } else
+          NewConst = Const->getValue().zext(PromotedWidth);
+
+        I->setOperand(i, ConstantInt::get(Const->getContext(), NewConst));
       } else if (isa<UndefValue>(Op))
         I->setOperand(i, ConstantInt::get(ExtTy, 0));
     }
diff --git a/llvm/test/CodeGen/AArch64/and-mask-removal.ll b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
index 17ff0159701689..4ed482c8f2ba39 100644
--- a/llvm/test/CodeGen/AArch64/and-mask-removal.ll
+++ b/llvm/test/CodeGen/AArch64/and-mask-removal.ll
@@ -65,9 +65,8 @@ if.end:                                           ; preds = %if.then, %entry
 define zeroext i1 @test8_0(i8 zeroext %x)  align 2 {
 ; CHECK-LABEL: test8_0:
 ; CHECK:       ; %bb.0: ; %entry
-; CHECK-NEXT:    add w8, w0, #74
-; CHECK-NEXT:    and w8, w8, #0xff
-; CHECK-NEXT:    cmp w8, #236
+; CHECK-NEXT:    sub w8, w0, #182
+; CHECK-NEXT:    cmn w8, #20
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
 entry:
@@ -295,20 +294,20 @@ ret_true:
 define zeroext i1 @test16_2(i16 zeroext %x)  align 2 {
 ; CHECK-SD-LABEL: test16_2:
 ; CHECK-SD:       ; %bb.0: ; %entry
-; CHECK-SD-NEXT:    mov w8, #16882 ; =0x41f2
-; CHECK-SD-NEXT:    mov w9, #40700 ; =0x9efc
+; CHECK-SD-NEXT:    mov w8, #-48654 ; =0xffff41f2
+; CHECK-SD-NEXT:    mov w9, #-24836 ; =0xffff9efc
 ; CHECK-SD-NEXT:    add w8, w0, w8
-; CHECK-SD-NEXT:    cmp w9, w8, uxth
-; CHECK-SD-NEXT:    cset w0, hi
+; CHECK-SD-NEXT:    cmp w8, w9
+; CHECK-SD-NEXT:    cset w0, lo
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test16_2:
 ; CHECK-GI:       ; %bb.0: ; %entry
-; CHECK-GI-NEXT:    mov w8, #16882 ; =0x41f2
-; CHECK-GI-NEXT:    mov w9, #40699 ; =0x9efb
+; CHECK-GI-NEXT:    mov w8, #-48654 ; =0xffff41f2
+; CHECK-GI-NEXT:    mov w9, #-24837 ; =0xffff9efb
 ; CHECK-GI-NEXT:    add w8, w0, w8
-; CHECK-GI-NEXT:    cmp w9, w8, uxth
-; CHECK-GI-NEXT:    cset w0, hs
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    cset w0, ls
 ; CHECK-GI-NEXT:    ret
 entry:
   %0 = add i16 %x, 16882
@@ -349,20 +348,20 @@ ret_true:
 define zeroext i1 @test16_4(i16 zeroext %x)  align 2 {
 ; CHECK-SD-LABEL: test16_4:
 ; CHECK-SD:       ; %bb.0: ; %entry
-; CHECK-SD-NEXT:    mov w8, #29985 ; =0x7521
+; CHECK-SD-NEXT:    mov w8, #-35551 ; =0xffff7521
 ; CHECK-SD-NEXT:    mov w9, #15676 ; =0x3d3c
 ; CHECK-SD-NEXT:    add w8, w0, w8
-; CHECK-SD-NEXT:    cmp w9, w8, uxth
-; CHECK-SD-NEXT:    cset w0, lo
+; CHECK-SD-NEXT:    cmp w8, w9
+; CHECK-SD-NEXT:    cset w0, hi
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test16_4:
 ; CHECK-GI:       ; %bb.0: ; %entry
-; CHECK-GI-NEXT:    mov w8, #29985 ; =0x7521
+; CHECK-GI-NEXT:    mov w8, #-35551 ; =0xffff7521
 ; CHECK-GI-NEXT:    mov w9, #15677 ; =0x3d3d
 ; CHECK-GI-NEXT:    add w8, w0, w8
-; CHECK-GI-NEXT:    cmp w9, w8, uxth
-; CHECK-GI-NEXT:    cset w0, ls
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    cset w0, hs
 ; CHECK-GI-NEXT:    ret
 entry:
   %0 = add i16 %x, -35551
@@ -431,20 +430,20 @@ ret_true:
 define zeroext i1 @test16_7(i16 zeroext %x)  align 2 {
 ; CHECK-SD-LABEL: test16_7:
 ; CHECK-SD:       ; %bb.0: ; %entry
-; CHECK-SD-NEXT:    mov w8, #9272 ; =0x2438
-; CHECK-SD-NEXT:    mov w9, #22619 ; =0x585b
+; CHECK-SD-NEXT:    mov w8, #-56264 ; =0xffff2438
+; CHECK-SD-NEXT:    mov w9, #-42917 ; =0xffff585b
 ; CHECK-SD-NEXT:    add w8, w0, w8
-; CHECK-SD-NEXT:    cmp w9, w8, uxth
-; CHECK-SD-NEXT:    cset w0, lo
+; CHECK-SD-NEXT:    cmp w8, w9
+; CHECK-SD-NEXT:    cset w0, hi
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: test16_7:
 ; CHECK-GI:       ; %bb.0: ; %entry
-; CHECK-GI-NEXT:    mov w8, #9272 ; =0x2438
-; CHECK-GI-NEXT:    mov w9, #22620 ; =0x585c
+; CHECK-GI-NEXT:    mov w8, #-56264 ; =0xffff2438
+; CHECK-GI-NEXT:    mov w9, #-42916 ; =0xffff585c
 ; CHECK-GI-NEXT:    add w8, w0, w8
-; CHECK-GI-NEXT:    cmp w9, w8, uxth
-; CHECK-GI-NEXT:    cset w0, ls
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    cset w0, hs
 ; CHECK-GI-NEXT:    ret
 entry:
   %0 = add i16 %x, 9272
@@ -508,16 +507,17 @@ define i64 @pr58109(i8 signext %0) {
 define i64 @pr58109b(i8 signext %0, i64 %a, i64 %b) {
 ; CHECK-SD-LABEL: pr58109b:
 ; CHECK-SD:       ; %bb.0:
-; CHECK-SD-NEXT:    add w8, w0, #1
-; CHECK-SD-NEXT:    tst w8, #0xfe
-; CHECK-SD-NEXT:    csel x0, x1, x2, eq
+; CHECK-SD-NEXT:    and w8, w0, #0xff
+; CHECK-SD-NEXT:    sub w8, w8, #255
+; CHECK-SD-NEXT:    cmn w8, #254
+; CHECK-SD-NEXT:    csel x0, x1, x2, lo
 ; CHECK-SD-NEXT:    ret
 ;
 ; CHECK-GI-LABEL: pr58109b:
 ; CHECK-GI:       ; %bb.0:
-; CHECK-GI-NEXT:    add w8, w0, #1
-; CHECK-GI-NEXT:    and w8, w8, #0xff
-; CHECK-GI-NEXT:    cmp w8, #2
+; CHECK-GI-NEXT:    mov w8, #-255 ; =0xffffff01
+; CHECK-GI-NEXT:    add w8, w8, w0, uxtb
+; CHECK-GI-NEXT:    cmn w8, #254
 ; CHECK-GI-NEXT:    csel x0, x1, x2, lo
 ; CHECK-GI-NEXT:    ret
   %2 = add i8 %0, 1
diff --git a/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
index 56a18b00a974cb..8e464d16dd90c5 100644
--- a/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
+++ b/llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
@@ -187,10 +187,11 @@ define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
 define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_i16_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxtb w8, w0
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, w0, uxth
-; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    mov w9, #-65281 // =0xffff00ff
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
@@ -256,10 +257,11 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
 define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugtcmp_i16_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxtb w8, w0
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, w0, uxth
-; CHECK-NEXT:    cset w0, ne
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    mov w9, #-65281 // =0xffff00ff
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ugt i16 %tmp0, 255 ; (1U << 8) - 1
@@ -301,9 +303,10 @@ define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
 define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i8_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #127
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    mov w9, #-65409 // =0xffff007f
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
@@ -315,9 +318,10 @@ define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #192
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #255
+; CHECK-NEXT:    mov w8, #-65344 // =0xffff00c0
+; CHECK-NEXT:    mov w9, #-65281 // =0xffff00ff
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
@@ -329,9 +333,10 @@ define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #767
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    mov w9, #-64769 // =0xffff02ff
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
@@ -343,9 +348,10 @@ define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i8_magic:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #64
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #255
+; CHECK-NEXT:    mov w8, #-65472 // =0xffff0040
+; CHECK-NEXT:    mov w9, #-65281 // =0xffff00ff
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
@@ -357,9 +363,10 @@ define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i16_i4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #8
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #15
+; CHECK-NEXT:    mov w8, #-65528 // =0xffff0008
+; CHECK-NEXT:    mov w9, #-65521 // =0xffff000f
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 8 ; 1U << (4-1)
@@ -371,9 +378,12 @@ define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind {
 ; CHECK-LABEL: add_ugecmp_bad_i24_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128
-; CHECK-NEXT:    and w8, w8, #0xffffff
-; CHECK-NEXT:    cmp w8, #255
+; CHECK-NEXT:    mov w8, #128 // =0x80
+; CHECK-NEXT:    and w9, w0, #0xffffff
+; CHECK-NEXT:    movk w8, #65280, lsl #16
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    mov w9, #-16776961 // =0xff0000ff
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %tmp0 = add i24 %x, 128 ; 1U << (8-1)
diff --git a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
index ab42e6463feeed..6a5795ff337dd7 100644
--- a/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/AArch64/signed-truncation-check.ll
@@ -200,10 +200,11 @@ define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
 define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_i16_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxtb w8, w0
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, w0, uxth
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    mov w9, #-65280 // =0xffff0100
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
@@ -269,10 +270,11 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
 define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: add_ulecmp_i16_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxtb w8, w0
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, w0, uxth
-; CHECK-NEXT:    cset w0, eq
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    mov w9, #-65280 // =0xffff0100
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
+; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1
@@ -314,9 +316,9 @@ define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
 define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i8_i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    and w8, w0, #0xffff
-; CHECK-NEXT:    add w8, w8, #128
-; CHECK-NEXT:    lsr w0, w8, #16
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    cmn w8, w0, uxth
+; CHECK-NEXT:    cset w0, hs
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i16 %tmp0, 128 ; 1U << (8-1)
@@ -327,9 +329,10 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #192
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #256
+; CHECK-NEXT:    mov w8, #-65344 // =0xffff00c0
+; CHECK-NEXT:    mov w9, #-65280 // =0xffff0100
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
@@ -341,9 +344,10 @@ define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #768
+; CHECK-NEXT:    mov w8, #-65408 // =0xffff0080
+; CHECK-NEXT:    mov w9, #-64768 // =0xffff0300
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
@@ -355,9 +359,10 @@ define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i8_magic:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #64
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #256
+; CHECK-NEXT:    mov w8, #-65472 // =0xffff0040
+; CHECK-NEXT:    mov w9, #-65280 // =0xffff0100
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
@@ -369,9 +374,10 @@ define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i16_i4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #8
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #16
+; CHECK-NEXT:    mov w8, #-65528 // =0xffff0008
+; CHECK-NEXT:    mov w9, #-65520 // =0xffff0010
+; CHECK-NEXT:    add w8, w8, w0, uxth
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 8 ; 1U << (4-1)
@@ -383,9 +389,13 @@ define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
 ; CHECK-LABEL: add_ultcmp_bad_i24_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #128
-; CHECK-NEXT:    and w8, w8, #0xffffff
-; CHECK-NEXT:    cmp w8, #256
+; CHECK-NEXT:    mov w8, #128 // =0x80
+; CHECK-NEXT:    and w9, w0, #0xffffff
+; CHECK-NEXT:    movk w8, #65280, lsl #16
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    mov w9, #256 // =0x100
+; CHECK-NEXT:    movk w9, #65280, lsl #16
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, lo
 ; CHECK-NEXT:    ret
   %tmp0 = add i24 %x, 128 ; 1U << (8-1)
@@ -396,7 +406,7 @@ define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
 define i1 @add_ulecmp_bad_i16_i8(i16 %x) nounwind {
 ; CHECK-LABEL: add_ulecmp_bad_i16_i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w0, #1
+; CHECK-NEXT:    mov w0, #1 // =0x1
 ; CHECK-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ule i16 %tmp0, -1 ; when we +1 it, it will wrap to 0
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
index ccfbf456693d7a..39edc03ced442e 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-overflow.ll
@@ -246,9 +246,8 @@ define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
 ; CHECK-LABEL: safe_sub_var_imm:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldrb w8, [x0]
-; CHECK-NEXT:    add w8, w8, #8
-; CHECK-NEXT:    and w8, w8, #0xff
-; CHECK-NEXT:    cmp w8, #252
+; CHECK-NEXT:    sub w8, w8, #248
+; CHECK-NEXT:    cmn w8, #4
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-signed.ll b/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
index 212f02d86850b8..98cc6cb243fdaa 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-signed.ll
@@ -30,9 +30,10 @@ define i16 @test_ashr(i16 zeroext %arg) local_unnamed_addr #1 {
 define i16 @test_sdiv(i16 zeroext %arg) local_unnamed_addr #1 {
 ; CHECK-LABEL: test_sdiv:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    add w8, w0, #1
-; CHECK-NEXT:    and w8, w8, #0xffff
-; CHECK-NEXT:    cmp w8, #2
+; CHECK-NEXT:    mov w8, #-65535 // =0xffff0001
+; CHECK-NEXT:    mov w9, #-65534 // =0xffff0002
+; CHECK-NEXT:    add w8, w0, w8
+; CHECK-NEXT:    cmp w8, w9
 ; CHECK-NEXT:    cset w0, hi
 ; CHECK-NEXT:    ret
   %arg.off = add i16 %arg, 1
diff --git a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
index 6e3a50542939f1..2030296580184d 100644
--- a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll
@@ -484,36 +484,44 @@ define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
 define i1 @add_ugecmp_i16_i8(i16 %x) nounwind {
 ; RV32I-LABEL: add_ugecmp_i16_i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 128
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 255
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ugecmp_i16_i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
-; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 128
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 255
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ugecmp_i16_i8:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
-; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 255
+; RV32ZBB-NEXT:    sltu a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ugecmp_i16_i8:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
-; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 255
+; RV64ZBB-NEXT:    sltu a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
@@ -672,36 +680,44 @@ define i1 @add_ugecmp_i64_i8(i64 %x) nounwind {
 define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
 ; RV32I-LABEL: add_ugtcmp_i16_i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 128
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 255
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ugtcmp_i16_i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
-; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 128
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 255
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ugtcmp_i16_i8:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
-; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 255
+; RV32ZBB-NEXT:    sltu a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ugtcmp_i16_i8:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
-; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 255
+; RV64ZBB-NEXT:    sltu a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ugt i16 %tmp0, 255 ; (1U << 8) - 1
@@ -801,36 +817,44 @@ define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
 define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
 ; RV32I-LABEL: add_ugecmp_bad_i8_i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 128
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 128
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 127
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ugecmp_bad_i8_i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 128
-; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 128
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 127
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ugecmp_bad_i8_i16:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 128
-; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 127
+; RV32ZBB-NEXT:    sltu a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ugecmp_bad_i8_i16:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 128
-; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 127
+; RV64ZBB-NEXT:    sltu a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp uge i16 %tmp0, 128 ; 1U << (8-1)
@@ -841,36 +865,44 @@ define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 ; RV32I-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 192
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 192
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 255
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 192
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
-; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 192
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 255
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 192
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
-; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 192
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 255
+; RV32ZBB-NEXT:    sltu a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 192
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
-; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 192
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 255
+; RV64ZBB-NEXT:    sltu a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
   %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
@@ -881,36 +913,44 @@ define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 ; RV32I-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 768
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 128
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 767
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 768
-; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 128
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 767
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 768
-; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 767
+; RV32ZBB-NEXT:    sltu a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 768
-; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 767
+; RV64ZBB-NEXT:    sltu a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp uge i16 %tmp0, 768 ; (1U << 8)) + (1U << (8+1))
@@ -921,36 +961,44 @@ define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
 ; RV32I-LABEL: add_ugecmp_bad_i16_i8_magic:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 64
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 64
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 255
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ugecmp_bad_i16_i8_magic:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 64
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
-; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 64
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 255
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ugecmp_bad_i16_i8_magic:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 64
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
-; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 64
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 255
+; RV32ZBB-NEXT:    sltu a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ugecmp_bad_i16_i8_magic:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 64
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
-; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 64
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 255
+; RV64ZBB-NEXT:    sltu a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
   %tmp1 = icmp uge i16 %tmp0, 256 ; 1U << 8
@@ -961,36 +1009,44 @@ define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
 ; RV32I-LABEL: add_ugecmp_bad_i16_i4:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 8
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 16
-; RV32I-NEXT:    xori a0, a0, 1
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 8
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 15
+; RV32I-NEXT:    sltu a0, a1, a0
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ugecmp_bad_i16_i4:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 8
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 16
-; RV64I-NEXT:    xori a0, a0, 1
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 8
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 15
+; RV64I-NEXT:    sltu a0, a1, a0
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ugecmp_bad_i16_i4:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 8
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 16
-; RV32ZBB-NEXT:    xori a0, a0, 1
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 8
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 15
+; RV32ZBB-NEXT:    sltu a0, a1, a0
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ugecmp_bad_i16_i4:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 8
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 16
-; RV64ZBB-NEXT:    xori a0, a0, 1
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 8
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 15
+; RV64ZBB-NEXT:    sltu a0, a1, a0
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 8 ; 1U << (4-1)
   %tmp1 = icmp uge i16 %tmp0, 16 ; 1U << 4
@@ -1001,20 +1057,24 @@ define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
 define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind {
 ; RV32-LABEL: add_ugecmp_bad_i24_i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, 128
 ; RV32-NEXT:    slli a0, a0, 8
 ; RV32-NEXT:    srli a0, a0, 8
-; RV32-NEXT:    sltiu a0, a0, 256
-; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    addi a2, a1, 128
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    addi a1, a1, 255
+; RV32-NEXT:    sltu a0, a1, a0
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: add_ugecmp_bad_i24_i8:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, a0, 128
 ; RV64-NEXT:    slli a0, a0, 40
 ; RV64-NEXT:    srli a0, a0, 40
-; RV64-NEXT:    sltiu a0, a0, 256
-; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    lui a1, 1044480
+; RV64-NEXT:    addiw a2, a1, 128
+; RV64-NEXT:    add a0, a0, a2
+; RV64-NEXT:    addiw a1, a1, 255
+; RV64-NEXT:    sltu a0, a1, a0
 ; RV64-NEXT:    ret
   %tmp0 = add i24 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp uge i24 %tmp0, 256 ; 1U << 8
diff --git a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
index de36bcdb910609..4c0d2c70fe4501 100644
--- a/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
+++ b/llvm/test/CodeGen/RISCV/signed-truncation-check.ll
@@ -540,32 +540,44 @@ define i1 @add_ugtcmp_i16_i8(i16 %x) nounwind {
 define i1 @add_ultcmp_i16_i8(i16 %x) nounwind {
 ; RV32I-LABEL: add_ultcmp_i16_i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 128
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 256
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ultcmp_i16_i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 128
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 256
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ultcmp_i16_i8:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 256
+; RV32ZBB-NEXT:    sltu a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ultcmp_i16_i8:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 256
+; RV64ZBB-NEXT:    sltu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
@@ -720,32 +732,44 @@ define i1 @add_ultcmp_i64_i8(i64 %x) nounwind {
 define i1 @add_ulecmp_i16_i8(i16 %x) nounwind {
 ; RV32I-LABEL: add_ulecmp_i16_i8:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 128
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 256
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ulecmp_i16_i8:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 128
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 256
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ulecmp_i16_i8:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 256
+; RV32ZBB-NEXT:    sltu a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ulecmp_i16_i8:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 256
+; RV64ZBB-NEXT:    sltu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ule i16 %tmp0, 255 ; (1U << 8) - 1
@@ -837,32 +861,40 @@ define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
 define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
 ; RV32I-LABEL: add_ultcmp_bad_i8_i16:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 128
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a1, a1, 128
+; RV32I-NEXT:    add a0, a0, a1
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ultcmp_bad_i8_i16:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 128
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a1, a1, 128
+; RV64I-NEXT:    add a0, a0, a1
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ultcmp_bad_i8_i16:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 128
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a1, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a1
+; RV32ZBB-NEXT:    sltu a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ultcmp_bad_i8_i16:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 128
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a1, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a1
+; RV64ZBB-NEXT:    sltu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i16 %tmp0, 128 ; 1U << (8-1)
@@ -873,32 +905,44 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 ; RV32I-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 192
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 192
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 256
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 192
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 192
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 256
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 192
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 192
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 256
+; RV32ZBB-NEXT:    sltu a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 192
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 192
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 256
+; RV64ZBB-NEXT:    sltu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
   %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
@@ -909,32 +953,44 @@ define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 ; RV32I-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 128
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 768
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 128
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 768
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 128
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 768
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 128
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 768
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 128
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 768
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 128
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 768
+; RV32ZBB-NEXT:    sltu a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 128
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 768
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 128
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 768
+; RV64ZBB-NEXT:    sltu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i16 %tmp0, 768 ; (1U << 8)) + (1U << (8+1))
@@ -945,32 +1001,44 @@ define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
 ; RV32I-LABEL: add_ultcmp_bad_i16_i8_magic:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 64
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 256
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 64
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 256
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ultcmp_bad_i16_i8_magic:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 64
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 256
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 64
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 256
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ultcmp_bad_i16_i8_magic:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 64
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 256
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 64
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 256
+; RV32ZBB-NEXT:    sltu a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ultcmp_bad_i16_i8_magic:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 64
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 256
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 64
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 256
+; RV64ZBB-NEXT:    sltu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
   %tmp1 = icmp ult i16 %tmp0, 256 ; 1U << 8
@@ -981,32 +1049,44 @@ define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
 ; RV32I-LABEL: add_ultcmp_bad_i16_i4:
 ; RV32I:       # %bb.0:
-; RV32I-NEXT:    addi a0, a0, 8
 ; RV32I-NEXT:    slli a0, a0, 16
 ; RV32I-NEXT:    srli a0, a0, 16
-; RV32I-NEXT:    sltiu a0, a0, 16
+; RV32I-NEXT:    lui a1, 1048560
+; RV32I-NEXT:    addi a2, a1, 8
+; RV32I-NEXT:    add a0, a0, a2
+; RV32I-NEXT:    addi a1, a1, 16
+; RV32I-NEXT:    sltu a0, a0, a1
 ; RV32I-NEXT:    ret
 ;
 ; RV64I-LABEL: add_ultcmp_bad_i16_i4:
 ; RV64I:       # %bb.0:
-; RV64I-NEXT:    addi a0, a0, 8
 ; RV64I-NEXT:    slli a0, a0, 48
 ; RV64I-NEXT:    srli a0, a0, 48
-; RV64I-NEXT:    sltiu a0, a0, 16
+; RV64I-NEXT:    lui a1, 1048560
+; RV64I-NEXT:    addiw a2, a1, 8
+; RV64I-NEXT:    add a0, a0, a2
+; RV64I-NEXT:    addiw a1, a1, 16
+; RV64I-NEXT:    sltu a0, a0, a1
 ; RV64I-NEXT:    ret
 ;
 ; RV32ZBB-LABEL: add_ultcmp_bad_i16_i4:
 ; RV32ZBB:       # %bb.0:
-; RV32ZBB-NEXT:    addi a0, a0, 8
 ; RV32ZBB-NEXT:    zext.h a0, a0
-; RV32ZBB-NEXT:    sltiu a0, a0, 16
+; RV32ZBB-NEXT:    lui a1, 1048560
+; RV32ZBB-NEXT:    addi a2, a1, 8
+; RV32ZBB-NEXT:    add a0, a0, a2
+; RV32ZBB-NEXT:    addi a1, a1, 16
+; RV32ZBB-NEXT:    sltu a0, a0, a1
 ; RV32ZBB-NEXT:    ret
 ;
 ; RV64ZBB-LABEL: add_ultcmp_bad_i16_i4:
 ; RV64ZBB:       # %bb.0:
-; RV64ZBB-NEXT:    addi a0, a0, 8
 ; RV64ZBB-NEXT:    zext.h a0, a0
-; RV64ZBB-NEXT:    sltiu a0, a0, 16
+; RV64ZBB-NEXT:    lui a1, 1048560
+; RV64ZBB-NEXT:    addiw a2, a1, 8
+; RV64ZBB-NEXT:    add a0, a0, a2
+; RV64ZBB-NEXT:    addiw a1, a1, 16
+; RV64ZBB-NEXT:    sltu a0, a0, a1
 ; RV64ZBB-NEXT:    ret
   %tmp0 = add i16 %x, 8 ; 1U << (4-1)
   %tmp1 = icmp ult i16 %tmp0, 16 ; 1U << 4
@@ -1017,18 +1097,24 @@ define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
 define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
 ; RV32-LABEL: add_ultcmp_bad_i24_i8:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    addi a0, a0, 128
 ; RV32-NEXT:    slli a0, a0, 8
 ; RV32-NEXT:    srli a0, a0, 8
-; RV32-NEXT:    sltiu a0, a0, 256
+; RV32-NEXT:    lui a1, 1044480
+; RV32-NEXT:    addi a2, a1, 128
+; RV32-NEXT:    add a0, a0, a2
+; RV32-NEXT:    addi a1, a1, 256
+; RV32-NEXT:    sltu a0, a0, a1
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: add_ultcmp_bad_i24_i8:
 ; RV64:       # %bb.0:
-; RV64-NEXT:    addi a0, a0, 128
 ; RV64-NEXT:    slli a0, a0, 40
 ; RV64-NEXT:    srli a0, a0, 40
-; RV64-NEXT:    sltiu a0, a0, 256
+; RV64-NEXT:    lui a1, 1044480
+; RV64-NEXT:    addiw a2, a1, 128
+; RV64-NEXT:    add a0, a0, a2
+; RV64-NEXT:    addiw a1, a1, 256
+; RV64-NEXT:    sltu a0, a0, a1
 ; RV64-NEXT:    ret
   %tmp0 = add i24 %x, 128 ; 1U << (8-1)
   %tmp1 = icmp ult i24 %tmp0, 256 ; 1U << 8
diff --git a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
index 3740dc675949fa..ec7e0ecce80caa 100644
--- a/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
+++ b/llvm/test/CodeGen/RISCV/typepromotion-overflow.ll
@@ -283,9 +283,8 @@ define i32 @safe_sub_var_imm(ptr nocapture readonly %b) local_unnamed_addr #1 {
 ; CHECK-LABEL: safe_sub_var_imm:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    addi a0, a0, 8
-; CHECK-NEXT:    andi a0, a0, 255
-; CHECK-NEXT:    sltiu a0, a0, 253
+; CHECK-NEXT:    addi a0, a0, -248
+; CHECK-NEXT:    sltiu a0, a0, -3
 ; CHECK-NEXT:    xori a0, a0, 1
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/Transforms/TypePromotion/ARM/icmps.ll b/llvm/test/Transforms/TypePromotion/ARM/icmps.ll
index 842aab121b96fb..fb537a1f64705c 100644
--- a/llvm/test/Transforms/TypePromotion/ARM/icmps.ll
+++ b/llvm/test/Transforms/TypePromotion/ARM/icmps.ll
@@ -4,8 +4,9 @@
 define i32 @test_ult_254_inc_imm(i8 zeroext %x) {
 ; CHECK-LABEL: @test_ult_254_inc_imm(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[X:%.*]], 1
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i8 [[ADD]], -2
+; CHECK-NEXT:    [[TMP0:%.*]] = zext i8 [[X:%.*]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[TMP0]], -255
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[ADD]], -2
 ; CHECK-NEXT:    [[RES:%.*]] = select i1 [[CMP]], i32 35, i32 47
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
@@ -368,7 +369,7 @@ if.end:
 define i32 @degenerateicmp() {
 ; CHECK-LABEL: @degenerateicmp(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 190, 0
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i32 225, [[TMP1]]
+; CHECK-NEXT:    [[TMP2:%.*]] = icmp ugt i32 -31, [[TMP1]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 1, i32 0
 ; CHECK-NEXT:    ret i32 [[TMP3]]
 ;
diff --git a/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll b/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll
index 377708cf71134a..78c5e7323ceab3 100644
--- a/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll
+++ b/llvm/test/Transforms/TypePromotion/ARM/wrapping.ll
@@ -89,8 +89,9 @@ define i32 @overflow_add_const_limit(i8 zeroext %a, i8 zeroext %b) {
 
 define i32 @overflow_add_positive_const_limit(i8 zeroext %a) {
 ; CHECK-LABEL: @overflow_add_positive_const_limit(
-; CHECK-NEXT:    [[ADD:%.*]] = add i8 [[A:%.*]], 1
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 [[ADD]], -128
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[TMP1]], -255
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[ADD]], -128
 ; CHECK-NEXT:    [[RES:%.*]] = select i1 [[CMP]], i32 8, i32 16
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
@@ -144,8 +145,9 @@ define i32 @safe_add_underflow_neg(i8 zeroext %a) {
 
 define i32 @overflow_sub_negative_const_limit(i8 zeroext %a) {
 ; CHECK-LABEL: @overflow_sub_negative_const_limit(
-; CHECK-NEXT:    [[SUB:%.*]] = sub i8 [[A:%.*]], -1
-; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i8 [[SUB]], -128
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[A:%.*]] to i32
+; CHECK-NEXT:    [[SUB:%.*]] = sub i32 [[TMP1]], 255
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[SUB]], -128
 ; CHECK-NEXT:    [[RES:%.*]] = select i1 [[CMP]], i32 8, i32 16
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;



More information about the llvm-commits mailing list