[clang] [llvm] [InstCombine] Infer zext nneg flag (PR #71534)

Nikita Popov via cfe-commits cfe-commits at lists.llvm.org
Tue Nov 7 07:14:56 PST 2023


https://github.com/nikic updated https://github.com/llvm/llvm-project/pull/71534

>From e965141dc8e0be4dceb3e302ea91761203015c72 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Mon, 6 Nov 2023 17:28:11 +0100
Subject: [PATCH] [InstCombine] Infer zext nneg flag

Use KnownBits to infer the nneg flag on zext instructions.
---
 clang/test/Headers/wasm.c                     |  6 ++---
 .../InstCombine/InstCombineCasts.cpp          |  5 ++++
 .../InstCombine/2010-11-01-lshr-mask.ll       |  2 +-
 .../X86/x86-vector-shifts-inseltpoison.ll     |  6 ++---
 .../InstCombine/X86/x86-vector-shifts.ll      | 10 +++----
 .../InstCombine/adjust-for-minmax.ll          |  4 +--
 .../test/Transforms/InstCombine/and-narrow.ll |  8 +++---
 .../test/Transforms/InstCombine/and-xor-or.ll |  2 +-
 llvm/test/Transforms/InstCombine/and.ll       | 18 ++++++-------
 .../InstCombine/assoc-cast-assoc.ll           |  4 +--
 .../test/Transforms/InstCombine/binop-cast.ll |  2 +-
 .../Transforms/InstCombine/cast-mul-select.ll | 20 +++++++-------
 llvm/test/Transforms/InstCombine/cast.ll      | 18 ++++++-------
 llvm/test/Transforms/InstCombine/ctpop.ll     |  4 +--
 llvm/test/Transforms/InstCombine/cttz.ll      |  8 +++---
 llvm/test/Transforms/InstCombine/fmul.ll      |  2 +-
 llvm/test/Transforms/InstCombine/freeze.ll    | 26 +++++++++----------
 .../InstCombine/load-bitcast-select.ll        |  2 +-
 llvm/test/Transforms/InstCombine/lshr.ll      | 10 +++----
 .../Transforms/InstCombine/minmax-fold.ll     |  2 +-
 .../InstCombine/minmax-intrinsics.ll          |  2 +-
 .../Transforms/InstCombine/narrow-math.ll     | 10 +++----
 .../Transforms/InstCombine/negated-bitmask.ll |  4 +--
 .../Transforms/InstCombine/overflow-mul.ll    |  2 +-
 .../InstCombine/reduction-add-sext-zext-i1.ll |  6 ++---
 .../InstCombine/reduction-xor-sext-zext-i1.ll |  4 +--
 llvm/test/Transforms/InstCombine/rem.ll       |  2 +-
 .../InstCombine/select-bitext-bitwise-ops.ll  |  8 +++---
 .../Transforms/InstCombine/select-bitext.ll   |  4 +--
 .../InstCombine/select-cmp-cttz-ctlz.ll       | 18 ++++++-------
 .../InstCombine/select-ctlz-to-cttz.ll        |  4 +--
 .../InstCombine/select-obo-peo-ops.ll         | 16 ++++++------
 .../InstCombine/select-with-bitwise-ops.ll    |  8 +++---
 ...ociation-in-bittest-with-truncation-shl.ll |  8 +++---
 llvm/test/Transforms/InstCombine/shift.ll     |  2 +-
 .../InstCombine/trunc-inseltpoison.ll         | 20 +++++++-------
 llvm/test/Transforms/InstCombine/trunc.ll     | 20 +++++++-------
 .../Transforms/InstCombine/udiv-simplify.ll   |  2 +-
 .../InstCombine/udivrem-change-width.ll       | 22 ++++++++--------
 .../InstCombine/vector-casts-inseltpoison.ll  |  2 +-
 .../Transforms/InstCombine/vector-casts.ll    |  2 +-
 llvm/test/Transforms/InstCombine/wcslen-1.ll  |  2 +-
 llvm/test/Transforms/InstCombine/wcslen-3.ll  |  2 +-
 .../InstCombine/zeroext-and-reduce.ll         |  2 +-
 .../Transforms/InstCombine/zext-or-icmp.ll    | 10 +++----
 llvm/test/Transforms/InstCombine/zext.ll      |  2 +-
 .../LoopVectorize/ARM/mve-reductions.ll       |  4 +--
 .../LoopVectorize/reduction-inloop.ll         | 10 +++----
 48 files changed, 181 insertions(+), 176 deletions(-)

diff --git a/clang/test/Headers/wasm.c b/clang/test/Headers/wasm.c
index a755499c6c79775..9643cafc1ce6c31 100644
--- a/clang/test/Headers/wasm.c
+++ b/clang/test/Headers/wasm.c
@@ -2183,7 +2183,7 @@ uint32_t test_i64x2_bitmask(v128_t a) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <2 x i64>
 // CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 63
-// CHECK-NEXT:    [[REM_I:%.*]] = zext i32 [[TMP1]] to i64
+// CHECK-NEXT:    [[REM_I:%.*]] = zext nneg i32 [[TMP1]] to i64
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT_I:%.*]] = insertelement <2 x i64> poison, i64 [[REM_I]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT_I:%.*]] = shufflevector <2 x i64> [[SPLAT_SPLATINSERT_I]], <2 x i64> poison, <2 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHL_I:%.*]] = shl <2 x i64> [[TMP0]], [[SPLAT_SPLAT_I]]
@@ -2198,7 +2198,7 @@ v128_t test_i64x2_shl(v128_t a, uint32_t b) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <2 x i64>
 // CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 63
-// CHECK-NEXT:    [[REM_I:%.*]] = zext i32 [[TMP1]] to i64
+// CHECK-NEXT:    [[REM_I:%.*]] = zext nneg i32 [[TMP1]] to i64
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT_I:%.*]] = insertelement <2 x i64> poison, i64 [[REM_I]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT_I:%.*]] = shufflevector <2 x i64> [[SPLAT_SPLATINSERT_I]], <2 x i64> poison, <2 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR_I:%.*]] = ashr <2 x i64> [[TMP0]], [[SPLAT_SPLAT_I]]
@@ -2213,7 +2213,7 @@ v128_t test_i64x2_shr(v128_t a, uint32_t b) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <2 x i64>
 // CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 63
-// CHECK-NEXT:    [[REM_I:%.*]] = zext i32 [[TMP1]] to i64
+// CHECK-NEXT:    [[REM_I:%.*]] = zext nneg i32 [[TMP1]] to i64
 // CHECK-NEXT:    [[SPLAT_SPLATINSERT_I:%.*]] = insertelement <2 x i64> poison, i64 [[REM_I]], i64 0
 // CHECK-NEXT:    [[SPLAT_SPLAT_I:%.*]] = shufflevector <2 x i64> [[SPLAT_SPLATINSERT_I]], <2 x i64> poison, <2 x i32> zeroinitializer
 // CHECK-NEXT:    [[SHR_I:%.*]] = lshr <2 x i64> [[TMP0]], [[SPLAT_SPLAT_I]]
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 46ef17d0e628276..e1dfccbcf0c2dbd 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1228,6 +1228,11 @@ Instruction *InstCombinerImpl::visitZExt(ZExtInst &Zext) {
     }
   }
 
+  if (!Zext.hasNonNeg() && isKnownNonNegative(Src, DL, 0, &AC, &Zext, &DT)) {
+    Zext.setNonNeg();
+    return &Zext;
+  }
+
   return nullptr;
 }
 
diff --git a/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll b/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll
index eda4053cf0f6988..3081baa2db281e4 100644
--- a/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll
+++ b/llvm/test/Transforms/InstCombine/2010-11-01-lshr-mask.ll
@@ -8,7 +8,7 @@ define i32 @main(i32 %argc) {
 ; CHECK-NEXT:    [[T3163:%.*]] = xor i8 [[T3151]], -1
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i8 [[T3163]], 5
 ; CHECK-NEXT:    [[T4127:%.*]] = and i8 [[TMP1]], 64
-; CHECK-NEXT:    [[T4086:%.*]] = zext i8 [[T4127]] to i32
+; CHECK-NEXT:    [[T4086:%.*]] = zext nneg i8 [[T4127]] to i32
 ; CHECK-NEXT:    ret i32 [[T4086]]
 ;
   %t3151 = trunc i32 %argc to i8
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll
index 8c2ba9701e72a5b..21d5723cbb82d63 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts-inseltpoison.ll
@@ -2816,7 +2816,7 @@ define <8 x i32> @avx2_psrai_d_256_masked(<8 x i32> %v, i32 %a) {
 define <8 x i64> @avx512_psrai_q_512_masked(<8 x i64> %v, i32 %a) {
 ; CHECK-LABEL: @avx512_psrai_q_512_masked(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TMP2]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i64> [[DOTSPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = ashr <8 x i64> [[V:%.*]], [[DOTSPLAT]]
@@ -2843,7 +2843,7 @@ define <4 x i32> @sse2_psrli_d_128_masked(<4 x i32> %v, i32 %a) {
 define <4 x i64> @avx2_psrli_q_256_masked(<4 x i64> %v, i32 %a) {
 ; CHECK-LABEL: @avx2_psrli_q_256_masked(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP2]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = lshr <4 x i64> [[V:%.*]], [[DOTSPLAT]]
@@ -2871,7 +2871,7 @@ define <32 x i16> @avx512_psrli_w_512_masked(<32 x i16> %v, i32 %a) {
 define <2 x i64> @sse2_pslli_q_128_masked(<2 x i64> %v, i32 %a) {
 ; CHECK-LABEL: @sse2_pslli_q_128_masked(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP2]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl <2 x i64> [[V:%.*]], [[DOTSPLAT]]
diff --git a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
index 63e44fda81552e6..a3b14ef2b1c1bee 100644
--- a/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
+++ b/llvm/test/Transforms/InstCombine/X86/x86-vector-shifts.ll
@@ -2772,8 +2772,8 @@ define <2 x i64> @sse2_psll_q_128_masked_bitcast(<2 x i64> %v, <2 x i64> %a) {
 ; CHECK-NEXT:    [[I:%.*]] = insertelement <4 x i32> [[M]], i32 0, i64 1
 ; CHECK-NEXT:    [[SHAMT:%.*]] = bitcast <4 x i32> [[I]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x i64> [[SHAMT]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP2:%.*]] = shl <2 x i64> [[V:%.*]], [[TMP1]]
-; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
+; CHECK-NEXT:    [[R:%.*]] = shl <2 x i64> [[V:%.*]], [[TMP1]]
+; CHECK-NEXT:    ret <2 x i64> [[R]]
 ;
   %b = bitcast <2 x i64> %a to <4 x i32>
   %m = and <4 x i32> %b, <i32 31, i32 poison, i32 poison, i32 poison>
@@ -2856,7 +2856,7 @@ define <8 x i32> @avx2_psrai_d_256_masked(<8 x i32> %v, i32 %a) {
 define <8 x i64> @avx512_psrai_q_512_masked(<8 x i64> %v, i32 %a) {
 ; CHECK-LABEL: @avx512_psrai_q_512_masked(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TMP2]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <8 x i64> [[DOTSPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = ashr <8 x i64> [[V:%.*]], [[DOTSPLAT]]
@@ -2883,7 +2883,7 @@ define <4 x i32> @sse2_psrli_d_128_masked(<4 x i32> %v, i32 %a) {
 define <4 x i64> @avx2_psrli_q_256_masked(<4 x i64> %v, i32 %a) {
 ; CHECK-LABEL: @avx2_psrli_q_256_masked(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP2]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <4 x i64> [[DOTSPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = lshr <4 x i64> [[V:%.*]], [[DOTSPLAT]]
@@ -2911,7 +2911,7 @@ define <32 x i16> @avx512_psrli_w_512_masked(<32 x i16> %v, i32 %a) {
 define <2 x i64> @sse2_pslli_q_128_masked(<2 x i64> %v, i32 %a) {
 ; CHECK-LABEL: @sse2_pslli_q_128_masked(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 63
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP2]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl <2 x i64> [[V:%.*]], [[DOTSPLAT]]
diff --git a/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll b/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll
index dced55944505370..76fc7a07be6bd61 100644
--- a/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll
+++ b/llvm/test/Transforms/InstCombine/adjust-for-minmax.ll
@@ -414,7 +414,7 @@ define <2 x i64> @umax_zext_vec(<2 x i32> %a) {
 define i64 @umin_zext(i32 %a) {
 ; CHECK-LABEL: @umin_zext(
 ; CHECK-NEXT:    [[NARROW:%.*]] = call i32 @llvm.umin.i32(i32 [[A:%.*]], i32 2)
-; CHECK-NEXT:    [[MIN:%.*]] = zext i32 [[NARROW]] to i64
+; CHECK-NEXT:    [[MIN:%.*]] = zext nneg i32 [[NARROW]] to i64
 ; CHECK-NEXT:    ret i64 [[MIN]]
 ;
   %a_ext = zext i32 %a to i64
@@ -426,7 +426,7 @@ define i64 @umin_zext(i32 %a) {
 define <2 x i64> @umin_zext_vec(<2 x i32> %a) {
 ; CHECK-LABEL: @umin_zext_vec(
 ; CHECK-NEXT:    [[NARROW:%.*]] = call <2 x i32> @llvm.umin.v2i32(<2 x i32> [[A:%.*]], <2 x i32> <i32 2, i32 2>)
-; CHECK-NEXT:    [[MIN:%.*]] = zext <2 x i32> [[NARROW]] to <2 x i64>
+; CHECK-NEXT:    [[MIN:%.*]] = zext nneg <2 x i32> [[NARROW]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[MIN]]
 ;
   %a_ext = zext <2 x i32> %a to <2 x i64>
diff --git a/llvm/test/Transforms/InstCombine/and-narrow.ll b/llvm/test/Transforms/InstCombine/and-narrow.ll
index 92894090ef66d71..c8c720f5fbc5534 100644
--- a/llvm/test/Transforms/InstCombine/and-narrow.ll
+++ b/llvm/test/Transforms/InstCombine/and-narrow.ll
@@ -47,7 +47,7 @@ define i16 @zext_lshr(i8 %x) {
 ; CHECK-LABEL: @zext_lshr(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 [[X:%.*]], 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X]]
-; CHECK-NEXT:    [[R:%.*]] = zext i8 [[TMP2]] to i16
+; CHECK-NEXT:    [[R:%.*]] = zext nneg i8 [[TMP2]] to i16
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %z = zext i8 %x to i16
@@ -60,7 +60,7 @@ define i16 @zext_ashr(i8 %x) {
 ; CHECK-LABEL: @zext_ashr(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i8 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X]]
-; CHECK-NEXT:    [[R:%.*]] = zext i8 [[TMP2]] to i16
+; CHECK-NEXT:    [[R:%.*]] = zext nneg i8 [[TMP2]] to i16
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %z = zext i8 %x to i16
@@ -125,7 +125,7 @@ define <2 x i16> @zext_lshr_vec(<2 x i8> %x) {
 ; CHECK-LABEL: @zext_lshr_vec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 2>
 ; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X]]
-; CHECK-NEXT:    [[R:%.*]] = zext <2 x i8> [[TMP2]] to <2 x i16>
+; CHECK-NEXT:    [[R:%.*]] = zext nneg <2 x i8> [[TMP2]] to <2 x i16>
 ; CHECK-NEXT:    ret <2 x i16> [[R]]
 ;
   %z = zext <2 x i8> %x to <2 x i16>
@@ -138,7 +138,7 @@ define <2 x i16> @zext_ashr_vec(<2 x i8> %x) {
 ; CHECK-LABEL: @zext_ashr_vec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 2, i8 3>
 ; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X]]
-; CHECK-NEXT:    [[R:%.*]] = zext <2 x i8> [[TMP2]] to <2 x i16>
+; CHECK-NEXT:    [[R:%.*]] = zext nneg <2 x i8> [[TMP2]] to <2 x i16>
 ; CHECK-NEXT:    ret <2 x i16> [[R]]
 ;
   %z = zext <2 x i8> %x to <2 x i16>
diff --git a/llvm/test/Transforms/InstCombine/and-xor-or.ll b/llvm/test/Transforms/InstCombine/and-xor-or.ll
index 741fc1eca65d1e9..69a7890bee22f80 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-or.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-or.ll
@@ -4207,7 +4207,7 @@ define i16 @and_zext_zext(i8 %x, i4 %y) {
 ; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = zext i4 [[Y]] to i8
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i8 [[TMP1]], [[X]]
-; CHECK-NEXT:    [[R:%.*]] = zext i8 [[TMP2]] to i16
+; CHECK-NEXT:    [[R:%.*]] = zext nneg i8 [[TMP2]] to i16
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %zx = zext i8 %x to i16
diff --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll
index 95b1b0e73ea5c7a..386ee3807050140 100644
--- a/llvm/test/Transforms/InstCombine/and.ll
+++ b/llvm/test/Transforms/InstCombine/and.ll
@@ -525,7 +525,7 @@ define <2 x i32> @and_demanded_bits_splat_vec(<2 x i32> %x) {
 define i32 @and_zext_demanded(i16 %x, i32 %y) {
 ; CHECK-LABEL: @and_zext_demanded(
 ; CHECK-NEXT:    [[S:%.*]] = lshr i16 [[X:%.*]], 8
-; CHECK-NEXT:    [[Z:%.*]] = zext i16 [[S]] to i32
+; CHECK-NEXT:    [[Z:%.*]] = zext nneg i16 [[S]] to i32
 ; CHECK-NEXT:    ret i32 [[Z]]
 ;
   %s = lshr i16 %x, 8
@@ -618,7 +618,7 @@ define i64 @test35(i32 %X) {
 ; CHECK-LABEL: @test35(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sub i32 0, [[X:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 240
-; CHECK-NEXT:    [[RES:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   %zext = zext i32 %X to i64
@@ -631,7 +631,7 @@ define <2 x i64> @test35_uniform(<2 x i32> %X) {
 ; CHECK-LABEL: @test35_uniform(
 ; CHECK-NEXT:    [[TMP1:%.*]] = sub <2 x i32> zeroinitializer, [[X:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 240, i32 240>
-; CHECK-NEXT:    [[RES:%.*]] = zext <2 x i32> [[TMP2]] to <2 x i64>
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg <2 x i32> [[TMP2]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %zext = zext <2 x i32> %X to <2 x i64>
@@ -644,7 +644,7 @@ define i64 @test36(i32 %X) {
 ; CHECK-LABEL: @test36(
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[X:%.*]], 7
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 240
-; CHECK-NEXT:    [[RES:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   %zext = zext i32 %X to i64
@@ -657,7 +657,7 @@ define <2 x i64> @test36_uniform(<2 x i32> %X) {
 ; CHECK-LABEL: @test36_uniform(
 ; CHECK-NEXT:    [[TMP1:%.*]] = add <2 x i32> [[X:%.*]], <i32 7, i32 7>
 ; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 240, i32 240>
-; CHECK-NEXT:    [[RES:%.*]] = zext <2 x i32> [[TMP2]] to <2 x i64>
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg <2 x i32> [[TMP2]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %zext = zext <2 x i32> %X to <2 x i64>
@@ -683,7 +683,7 @@ define i64 @test37(i32 %X) {
 ; CHECK-LABEL: @test37(
 ; CHECK-NEXT:    [[TMP1:%.*]] = mul i32 [[X:%.*]], 7
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 240
-; CHECK-NEXT:    [[RES:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   %zext = zext i32 %X to i64
@@ -696,7 +696,7 @@ define <2 x i64> @test37_uniform(<2 x i32> %X) {
 ; CHECK-LABEL: @test37_uniform(
 ; CHECK-NEXT:    [[TMP1:%.*]] = mul <2 x i32> [[X:%.*]], <i32 7, i32 7>
 ; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 240, i32 240>
-; CHECK-NEXT:    [[RES:%.*]] = zext <2 x i32> [[TMP2]] to <2 x i64>
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg <2 x i32> [[TMP2]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[RES]]
 ;
   %zext = zext <2 x i32> %X to <2 x i64>
@@ -721,7 +721,7 @@ define <2 x i64> @test37_nonuniform(<2 x i32> %X) {
 define i64 @test38(i32 %X) {
 ; CHECK-LABEL: @test38(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 240
-; CHECK-NEXT:    [[RES:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   %zext = zext i32 %X to i64
@@ -733,7 +733,7 @@ define i64 @test38(i32 %X) {
 define i64 @test39(i32 %X) {
 ; CHECK-LABEL: @test39(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 240
-; CHECK-NEXT:    [[RES:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   %zext = zext i32 %X to i64
diff --git a/llvm/test/Transforms/InstCombine/assoc-cast-assoc.ll b/llvm/test/Transforms/InstCombine/assoc-cast-assoc.ll
index 04b530647d0a26e..a3485978471dc05 100644
--- a/llvm/test/Transforms/InstCombine/assoc-cast-assoc.ll
+++ b/llvm/test/Transforms/InstCombine/assoc-cast-assoc.ll
@@ -54,7 +54,7 @@ define <2 x i32> @OrZextOrVec(<2 x i2> %a) {
 define i5 @AndZextAnd(i3 %a) {
 ; CHECK-LABEL: @AndZextAnd(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i3 [[A:%.*]], 2
-; CHECK-NEXT:    [[OP2:%.*]] = zext i3 [[TMP1]] to i5
+; CHECK-NEXT:    [[OP2:%.*]] = zext nneg i3 [[TMP1]] to i5
 ; CHECK-NEXT:    ret i5 [[OP2]]
 ;
   %op1 = and i3 %a, 3
@@ -66,7 +66,7 @@ define i5 @AndZextAnd(i3 %a) {
 define <2 x i32> @AndZextAndVec(<2 x i8> %a) {
 ; CHECK-LABEL: @AndZextAndVec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i8> [[A:%.*]], <i8 5, i8 0>
-; CHECK-NEXT:    [[OP2:%.*]] = zext <2 x i8> [[TMP1]] to <2 x i32>
+; CHECK-NEXT:    [[OP2:%.*]] = zext nneg <2 x i8> [[TMP1]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[OP2]]
 ;
   %op1 = and <2 x i8> %a, <i8 7, i8 0>
diff --git a/llvm/test/Transforms/InstCombine/binop-cast.ll b/llvm/test/Transforms/InstCombine/binop-cast.ll
index 20d5814c05d3aa7..e3345194d0b3284 100644
--- a/llvm/test/Transforms/InstCombine/binop-cast.ll
+++ b/llvm/test/Transforms/InstCombine/binop-cast.ll
@@ -276,7 +276,7 @@ define i64 @PR63321(ptr %ptr, i64 %c) {
 define i64 @and_add_non_bool(ptr %ptr, i64 %c) {
 ; CHECK-LABEL: @and_add_non_bool(
 ; CHECK-NEXT:    [[VAL:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
-; CHECK-NEXT:    [[RHS:%.*]] = zext i8 [[VAL]] to i64
+; CHECK-NEXT:    [[RHS:%.*]] = zext nneg i8 [[VAL]] to i64
 ; CHECK-NEXT:    [[MASK:%.*]] = add nsw i64 [[RHS]], -1
 ; CHECK-NEXT:    [[RES:%.*]] = and i64 [[MASK]], [[C:%.*]]
 ; CHECK-NEXT:    ret i64 [[RES]]
diff --git a/llvm/test/Transforms/InstCombine/cast-mul-select.ll b/llvm/test/Transforms/InstCombine/cast-mul-select.ll
index 23e934de0baeb7e..454522b85a1e843 100644
--- a/llvm/test/Transforms/InstCombine/cast-mul-select.ll
+++ b/llvm/test/Transforms/InstCombine/cast-mul-select.ll
@@ -119,7 +119,7 @@ define i32 @eval_zext_multi_use_in_one_inst(i32 %x) {
 ; CHECK-NEXT:    [[T:%.*]] = trunc i32 [[X:%.*]] to i16
 ; CHECK-NEXT:    [[A:%.*]] = and i16 [[T]], 5
 ; CHECK-NEXT:    [[M:%.*]] = mul nuw nsw i16 [[A]], [[A]]
-; CHECK-NEXT:    [[R:%.*]] = zext i16 [[M]] to i32
+; CHECK-NEXT:    [[R:%.*]] = zext nneg i16 [[M]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
 ; DBGINFO-LABEL: @eval_zext_multi_use_in_one_inst(
@@ -129,7 +129,7 @@ define i32 @eval_zext_multi_use_in_one_inst(i32 %x) {
 ; DBGINFO-NEXT:    call void @llvm.dbg.value(metadata i16 [[A]], metadata [[META66:![0-9]+]], metadata !DIExpression()), !dbg [[DBG70]]
 ; DBGINFO-NEXT:    [[M:%.*]] = mul nuw nsw i16 [[A]], [[A]], !dbg [[DBG71:![0-9]+]]
 ; DBGINFO-NEXT:    call void @llvm.dbg.value(metadata i16 [[M]], metadata [[META67:![0-9]+]], metadata !DIExpression()), !dbg [[DBG71]]
-; DBGINFO-NEXT:    [[R:%.*]] = zext i16 [[M]] to i32, !dbg [[DBG72:![0-9]+]]
+; DBGINFO-NEXT:    [[R:%.*]] = zext nneg i16 [[M]] to i32, !dbg [[DBG72:![0-9]+]]
 ; DBGINFO-NEXT:    call void @llvm.dbg.value(metadata i32 [[R]], metadata [[META68:![0-9]+]], metadata !DIExpression()), !dbg [[DBG72]]
 ; DBGINFO-NEXT:    ret i32 [[R]], !dbg [[DBG73:![0-9]+]]
 ;
@@ -183,13 +183,13 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) {
 ; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i32 [[B:%.*]], 0
 ; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = select i1 [[TOBOOL]], i8 0, i8 4
 ; CHECK-NEXT:    switch i3 [[V1:%.*]], label [[EXIT:%.*]] [
-; CHECK-NEXT:    i3 0, label [[FOR_END:%.*]]
-; CHECK-NEXT:    i3 -1, label [[FOR_END]]
+; CHECK-NEXT:      i3 0, label [[FOR_END:%.*]]
+; CHECK-NEXT:      i3 -1, label [[FOR_END]]
 ; CHECK-NEXT:    ]
 ; CHECK:       for.body3:
 ; CHECK-NEXT:    switch i3 [[V2:%.*]], label [[EXIT]] [
-; CHECK-NEXT:    i3 0, label [[FOR_END]]
-; CHECK-NEXT:    i3 -1, label [[FOR_END]]
+; CHECK-NEXT:      i3 0, label [[FOR_END]]
+; CHECK-NEXT:      i3 -1, label [[FOR_END]]
 ; CHECK-NEXT:    ]
 ; CHECK:       for.end:
 ; CHECK-NEXT:    [[H:%.*]] = phi i8 [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ 0, [[FOR_BODY3]] ], [ 0, [[FOR_BODY3]] ]
@@ -213,13 +213,13 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) {
 ; DBGINFO-NEXT:    [[SPEC_SELECT:%.*]] = select i1 [[TOBOOL]], i8 0, i8 4, !dbg [[DBG97:![0-9]+]]
 ; DBGINFO-NEXT:    call void @llvm.dbg.value(metadata i8 [[SPEC_SELECT]], metadata [[META90:![0-9]+]], metadata !DIExpression()), !dbg [[DBG97]]
 ; DBGINFO-NEXT:    switch i3 [[V1:%.*]], label [[EXIT:%.*]] [
-; DBGINFO-NEXT:    i3 0, label [[FOR_END:%.*]]
-; DBGINFO-NEXT:    i3 -1, label [[FOR_END]]
+; DBGINFO-NEXT:      i3 0, label [[FOR_END:%.*]]
+; DBGINFO-NEXT:      i3 -1, label [[FOR_END]]
 ; DBGINFO-NEXT:    ], !dbg [[DBG98:![0-9]+]]
 ; DBGINFO:       for.body3:
 ; DBGINFO-NEXT:    switch i3 [[V2:%.*]], label [[EXIT]] [
-; DBGINFO-NEXT:    i3 0, label [[FOR_END]]
-; DBGINFO-NEXT:    i3 -1, label [[FOR_END]]
+; DBGINFO-NEXT:      i3 0, label [[FOR_END]]
+; DBGINFO-NEXT:      i3 -1, label [[FOR_END]]
 ; DBGINFO-NEXT:    ], !dbg [[DBG99:![0-9]+]]
 ; DBGINFO:       for.end:
 ; DBGINFO-NEXT:    [[H:%.*]] = phi i8 [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ 0, [[FOR_BODY3]] ], [ 0, [[FOR_BODY3]] ], !dbg [[DBG100:![0-9]+]]
diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index 59e488f3f23d52a..afa7ac45e96dcb4 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -124,12 +124,12 @@ define void @test_invoke_vararg_cast(ptr %a, ptr %b) personality ptr @__gxx_pers
 ; ALL-LABEL: @test_invoke_vararg_cast(
 ; ALL-NEXT:  entry:
 ; ALL-NEXT:    invoke void (i32, ...) @varargs(i32 1, ptr [[B:%.*]], ptr [[A:%.*]])
-; ALL-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+; ALL-NEXT:            to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
 ; ALL:       invoke.cont:
 ; ALL-NEXT:    ret void
 ; ALL:       lpad:
 ; ALL-NEXT:    [[TMP0:%.*]] = landingpad { ptr, i32 }
-; ALL-NEXT:    cleanup
+; ALL-NEXT:            cleanup
 ; ALL-NEXT:    ret void
 ;
 entry:
@@ -619,7 +619,7 @@ define <2 x i64> @test46vec(<2 x i64> %A) {
 ; ALL-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
 ; ALL-NEXT:    [[C:%.*]] = shl <2 x i32> [[B]], <i32 8, i32 8>
 ; ALL-NEXT:    [[D:%.*]] = and <2 x i32> [[C]], <i32 10752, i32 10752>
-; ALL-NEXT:    [[E:%.*]] = zext <2 x i32> [[D]] to <2 x i64>
+; ALL-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[D]] to <2 x i64>
 ; ALL-NEXT:    ret <2 x i64> [[E]]
 ;
   %B = trunc <2 x i64> %A to <2 x i32>
@@ -647,7 +647,7 @@ define i64 @test48(i8 %A1, i8 %a2) {
 ; ALL-NEXT:    [[Z2:%.*]] = zext i8 [[A1:%.*]] to i32
 ; ALL-NEXT:    [[C:%.*]] = shl nuw nsw i32 [[Z2]], 8
 ; ALL-NEXT:    [[D:%.*]] = or i32 [[C]], [[Z2]]
-; ALL-NEXT:    [[E:%.*]] = zext i32 [[D]] to i64
+; ALL-NEXT:    [[E:%.*]] = zext nneg i32 [[D]] to i64
 ; ALL-NEXT:    ret i64 [[E]]
 ;
   %Z1 = zext i8 %a2 to i32
@@ -721,7 +721,7 @@ define i64 @test53(i32 %A) {
 ; ALL-LABEL: @test53(
 ; ALL-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 7224
 ; ALL-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], 32962
-; ALL-NEXT:    [[D:%.*]] = zext i32 [[TMP2]] to i64
+; ALL-NEXT:    [[D:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; ALL-NEXT:    ret i64 [[D]]
 ;
   %B = trunc i32 %A to i16
@@ -748,7 +748,7 @@ define i32 @test54(i64 %A) {
 define i64 @test55(i32 %A) {
 ; ALL-LABEL: @test55(
 ; ALL-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], 7224
-; ALL-NEXT:    [[C:%.*]] = zext i32 [[TMP1]] to i64
+; ALL-NEXT:    [[C:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; ALL-NEXT:    [[D:%.*]] = or i64 [[C]], -32574
 ; ALL-NEXT:    ret i64 [[D]]
 ;
@@ -776,7 +776,7 @@ define <2 x i64> @test56vec(<2 x i16> %A) {
 ; ALL-LABEL: @test56vec(
 ; ALL-NEXT:    [[P353:%.*]] = sext <2 x i16> [[A:%.*]] to <2 x i32>
 ; ALL-NEXT:    [[P354:%.*]] = lshr <2 x i32> [[P353]], <i32 5, i32 5>
-; ALL-NEXT:    [[P355:%.*]] = zext <2 x i32> [[P354]] to <2 x i64>
+; ALL-NEXT:    [[P355:%.*]] = zext nneg <2 x i32> [[P354]] to <2 x i64>
 ; ALL-NEXT:    ret <2 x i64> [[P355]]
 ;
   %p353 = sext <2 x i16> %A to <2 x i32>
@@ -801,7 +801,7 @@ define <2 x i64> @test57vec(<2 x i64> %A) {
 ; ALL-LABEL: @test57vec(
 ; ALL-NEXT:    [[B:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i32>
 ; ALL-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 8, i32 8>
-; ALL-NEXT:    [[E:%.*]] = zext <2 x i32> [[C]] to <2 x i64>
+; ALL-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[C]] to <2 x i64>
 ; ALL-NEXT:    ret <2 x i64> [[E]]
 ;
   %B = trunc <2 x i64> %A to <2 x i32>
@@ -831,7 +831,7 @@ define i64 @test59(i8 %A, i8 %B) {
 ; ALL-NEXT:    [[D:%.*]] = shl nuw nsw i64 [[C]], 4
 ; ALL-NEXT:    [[E:%.*]] = and i64 [[D]], 48
 ; ALL-NEXT:    [[TMP1:%.*]] = lshr i8 [[B:%.*]], 4
-; ALL-NEXT:    [[G:%.*]] = zext i8 [[TMP1]] to i64
+; ALL-NEXT:    [[G:%.*]] = zext nneg i8 [[TMP1]] to i64
 ; ALL-NEXT:    [[H:%.*]] = or i64 [[E]], [[G]]
 ; ALL-NEXT:    ret i64 [[H]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/ctpop.ll b/llvm/test/Transforms/InstCombine/ctpop.ll
index f3419768bbd0285..dcea5fa87479eb4 100644
--- a/llvm/test/Transforms/InstCombine/ctpop.ll
+++ b/llvm/test/Transforms/InstCombine/ctpop.ll
@@ -347,7 +347,7 @@ define <2 x i32> @sub_ctpop_vec_extra_use(<2 x i32> %a, ptr %p) {
 define i32 @zext_ctpop(i16 %x) {
 ; CHECK-LABEL: @zext_ctpop(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.ctpop.i16(i16 [[X:%.*]]), !range [[RNG4:![0-9]+]]
-; CHECK-NEXT:    [[P:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[P:%.*]] = zext nneg i16 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[P]]
 ;
   %z = zext i16 %x to i32
@@ -358,7 +358,7 @@ define i32 @zext_ctpop(i16 %x) {
 define <2 x i32> @zext_ctpop_vec(<2 x i7> %x) {
 ; CHECK-LABEL: @zext_ctpop_vec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i7> @llvm.ctpop.v2i7(<2 x i7> [[X:%.*]]), !range [[RNG2]]
-; CHECK-NEXT:    [[P:%.*]] = zext <2 x i7> [[TMP1]] to <2 x i32>
+; CHECK-NEXT:    [[P:%.*]] = zext nneg <2 x i7> [[TMP1]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[P]]
 ;
   %z = zext <2 x i7> %x to <2 x i32>
diff --git a/llvm/test/Transforms/InstCombine/cttz.ll b/llvm/test/Transforms/InstCombine/cttz.ll
index 4ef286afe26e97b..6ea5e5e141b2edf 100644
--- a/llvm/test/Transforms/InstCombine/cttz.ll
+++ b/llvm/test/Transforms/InstCombine/cttz.ll
@@ -9,7 +9,7 @@ declare void @use(i32)
 define i32 @cttz_zext_zero_undef(i16 %x) {
 ; CHECK-LABEL: @cttz_zext_zero_undef(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT:    [[TZ:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TZ:%.*]] = zext nneg i16 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[TZ]]
 ;
   %z = zext i16 %x to i32
@@ -44,7 +44,7 @@ define i32 @cttz_zext_zero_undef_extra_use(i16 %x) {
 define <2 x i64> @cttz_zext_zero_undef_vec(<2 x i32> %x) {
 ; CHECK-LABEL: @cttz_zext_zero_undef_vec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 true), !range [[RNG1]]
-; CHECK-NEXT:    [[TZ:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[TZ:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[TZ]]
 ;
   %z = zext <2 x i32> %x to <2 x i64>
@@ -66,7 +66,7 @@ define <2 x i64> @cttz_zext_zero_def_vec(<2 x i32> %x) {
 define i32 @cttz_sext_zero_undef(i16 %x) {
 ; CHECK-LABEL: @cttz_sext_zero_undef(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true), !range [[RNG0]]
-; CHECK-NEXT:    [[TZ:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT:    [[TZ:%.*]] = zext nneg i16 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[TZ]]
 ;
   %s = sext i16 %x to i32
@@ -101,7 +101,7 @@ define i32 @cttz_sext_zero_undef_extra_use(i16 %x) {
 define <2 x i64> @cttz_sext_zero_undef_vec(<2 x i32> %x) {
 ; CHECK-LABEL: @cttz_sext_zero_undef_vec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> [[X:%.*]], i1 true), !range [[RNG1]]
-; CHECK-NEXT:    [[TZ:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[TZ:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[TZ]]
 ;
   %s = sext <2 x i32> %x to <2 x i64>
diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll
index 5e6db8f964eb406..8ecbb85018428dd 100644
--- a/llvm/test/Transforms/InstCombine/fmul.ll
+++ b/llvm/test/Transforms/InstCombine/fmul.ll
@@ -1059,7 +1059,7 @@ define void @fmul_loop_invariant_fdiv(float* %a, float %x) {
 ; CHECK-NEXT:    ret void
 ; CHECK:       for.body:
 ; CHECK-NEXT:    [[I_08:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
-; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I_08]] to i64
+; CHECK-NEXT:    [[IDXPROM:%.*]] = zext nneg i32 [[I_08]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IDXPROM]]
 ; CHECK-NEXT:    [[F:%.*]] = load float, ptr [[ARRAYIDX]], align 4
 ; CHECK-NEXT:    [[M:%.*]] = fdiv fast float [[F]], [[X:%.*]]
diff --git a/llvm/test/Transforms/InstCombine/freeze.ll b/llvm/test/Transforms/InstCombine/freeze.ll
index 3fde49d08481278..dd9272b4b35f193 100644
--- a/llvm/test/Transforms/InstCombine/freeze.ll
+++ b/llvm/test/Transforms/InstCombine/freeze.ll
@@ -240,10 +240,10 @@ define void @freeze_dominated_uses_catchswitch(i1 %c, i32 %x) personality ptr @_
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    invoke void @use_i32(i32 0)
-; CHECK-NEXT:    to label [[CLEANUP:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
+; CHECK-NEXT:            to label [[CLEANUP:%.*]] unwind label [[CATCH_DISPATCH:%.*]]
 ; CHECK:       if.else:
 ; CHECK-NEXT:    invoke void @use_i32(i32 1)
-; CHECK-NEXT:    to label [[CLEANUP]] unwind label [[CATCH_DISPATCH]]
+; CHECK-NEXT:            to label [[CLEANUP]] unwind label [[CATCH_DISPATCH]]
 ; CHECK:       catch.dispatch:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ 0, [[IF_THEN]] ], [ [[X:%.*]], [[IF_ELSE]] ]
 ; CHECK-NEXT:    [[CS:%.*]] = catchswitch within none [label [[CATCH:%.*]], label %catch2] unwind to caller
@@ -384,7 +384,7 @@ define i32 @freeze_invoke_use_in_phi(i1 %c) personality ptr undef {
 ; CHECK-LABEL: @freeze_invoke_use_in_phi(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[X:%.*]] = invoke i32 @get_i32()
-; CHECK-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[INVOKE_UNWIND:%.*]]
+; CHECK-NEXT:            to label [[INVOKE_CONT:%.*]] unwind label [[INVOKE_UNWIND:%.*]]
 ; CHECK:       invoke.cont:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ 0, [[INVOKE_CONT]] ]
 ; CHECK-NEXT:    [[FR:%.*]] = freeze i32 [[X]]
@@ -393,7 +393,7 @@ define i32 @freeze_invoke_use_in_phi(i1 %c) personality ptr undef {
 ; CHECK-NEXT:    br label [[INVOKE_CONT]]
 ; CHECK:       invoke.unwind:
 ; CHECK-NEXT:    [[TMP0:%.*]] = landingpad i8
-; CHECK-NEXT:    cleanup
+; CHECK-NEXT:            cleanup
 ; CHECK-NEXT:    unreachable
 ;
 entry:
@@ -416,7 +416,7 @@ define i32 @freeze_invoke_use_after_phi(i1 %c) personality ptr undef {
 ; CHECK-LABEL: @freeze_invoke_use_after_phi(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[X:%.*]] = invoke i32 @get_i32()
-; CHECK-NEXT:    to label [[INVOKE_CONT:%.*]] unwind label [[INVOKE_UNWIND:%.*]]
+; CHECK-NEXT:            to label [[INVOKE_CONT:%.*]] unwind label [[INVOKE_UNWIND:%.*]]
 ; CHECK:       invoke.cont:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ 0, [[INVOKE_CONT]] ]
 ; CHECK-NEXT:    [[FR:%.*]] = freeze i32 [[X]]
@@ -426,7 +426,7 @@ define i32 @freeze_invoke_use_after_phi(i1 %c) personality ptr undef {
 ; CHECK-NEXT:    br label [[INVOKE_CONT]]
 ; CHECK:       invoke.unwind:
 ; CHECK-NEXT:    [[TMP0:%.*]] = landingpad i8
-; CHECK-NEXT:    cleanup
+; CHECK-NEXT:            cleanup
 ; CHECK-NEXT:    unreachable
 ;
 entry:
@@ -450,7 +450,7 @@ define i32 @freeze_callbr_use_after_phi(i1 %c) {
 ; CHECK-LABEL: @freeze_callbr_use_after_phi(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[X:%.*]] = callbr i32 asm sideeffect "", "=r"() #[[ATTR1:[0-9]+]]
-; CHECK-NEXT:    to label [[CALLBR_CONT:%.*]] []
+; CHECK-NEXT:            to label [[CALLBR_CONT:%.*]] []
 ; CHECK:       callbr.cont:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[X]], [[ENTRY:%.*]] ], [ 0, [[CALLBR_CONT]] ]
 ; CHECK-NEXT:    call void @use_i32(i32 [[X]])
@@ -493,10 +493,10 @@ define i1 @fully_propagate_freeze(i32 %0, i32 noundef %1) {
 ; CHECK-LABEL: @fully_propagate_freeze(
 ; CHECK-NEXT:    [[DOTFR:%.*]] = freeze i32 [[TMP0:%.*]]
 ; CHECK-NEXT:    [[DR:%.*]] = lshr i32 [[DOTFR]], 2
-; CHECK-NEXT:    [[IDX1:%.*]] = zext i32 [[DR]] to i64
+; CHECK-NEXT:    [[IDX1:%.*]] = zext nneg i32 [[DR]] to i64
 ; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[DR]], 1
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD]], [[TMP1:%.*]]
-; CHECK-NEXT:    [[IDX2:%.*]] = zext i32 [[DR]] to i64
+; CHECK-NEXT:    [[IDX2:%.*]] = zext nneg i32 [[DR]] to i64
 ; CHECK-NEXT:    [[V:%.*]] = call i1 @mock_use(i64 [[IDX1]], i64 [[IDX2]])
 ; CHECK-NEXT:    [[RET:%.*]] = and i1 [[V]], [[CMP]]
 ; CHECK-NEXT:    ret i1 [[RET]]
@@ -978,7 +978,7 @@ define void @fold_phi_invoke_start_value(i32 %n) personality ptr undef {
 ; CHECK-LABEL: @fold_phi_invoke_start_value(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[INIT:%.*]] = invoke i32 @get_i32()
-; CHECK-NEXT:    to label [[LOOP:%.*]] unwind label [[UNWIND:%.*]]
+; CHECK-NEXT:            to label [[LOOP:%.*]] unwind label [[UNWIND:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[INIT]], [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[I_FR:%.*]] = freeze i32 [[I]]
@@ -987,7 +987,7 @@ define void @fold_phi_invoke_start_value(i32 %n) personality ptr undef {
 ; CHECK-NEXT:    br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       unwind:
 ; CHECK-NEXT:    [[TMP0:%.*]] = landingpad i8
-; CHECK-NEXT:    cleanup
+; CHECK-NEXT:            cleanup
 ; CHECK-NEXT:    unreachable
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
@@ -1015,7 +1015,7 @@ define void @fold_phi_invoke_noundef_start_value(i32 %n) personality ptr undef {
 ; CHECK-LABEL: @fold_phi_invoke_noundef_start_value(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[INIT:%.*]] = invoke noundef i32 @get_i32()
-; CHECK-NEXT:    to label [[LOOP:%.*]] unwind label [[UNWIND:%.*]]
+; CHECK-NEXT:            to label [[LOOP:%.*]] unwind label [[UNWIND:%.*]]
 ; CHECK:       loop:
 ; CHECK-NEXT:    [[I:%.*]] = phi i32 [ [[INIT]], [[ENTRY:%.*]] ], [ [[I_NEXT:%.*]], [[LOOP]] ]
 ; CHECK-NEXT:    [[I_NEXT]] = add i32 [[I]], 1
@@ -1023,7 +1023,7 @@ define void @fold_phi_invoke_noundef_start_value(i32 %n) personality ptr undef {
 ; CHECK-NEXT:    br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]]
 ; CHECK:       unwind:
 ; CHECK-NEXT:    [[TMP0:%.*]] = landingpad i8
-; CHECK-NEXT:    cleanup
+; CHECK-NEXT:            cleanup
 ; CHECK-NEXT:    unreachable
 ; CHECK:       exit:
 ; CHECK-NEXT:    ret void
diff --git a/llvm/test/Transforms/InstCombine/load-bitcast-select.ll b/llvm/test/Transforms/InstCombine/load-bitcast-select.ll
index 85eb17ae3d9d59c..4c5f94d53ada5ab 100644
--- a/llvm/test/Transforms/InstCombine/load-bitcast-select.ll
+++ b/llvm/test/Transforms/InstCombine/load-bitcast-select.ll
@@ -15,7 +15,7 @@ define void @_Z3foov() {
 ; CHECK:       for.cond.cleanup:
 ; CHECK-NEXT:    ret void
 ; CHECK:       for.body:
-; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[I_0]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = zext nneg i32 [[I_0]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x float], ptr @a, i64 0, i64 [[TMP0]]
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [1000 x float], ptr @b, i64 0, i64 [[TMP0]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index f3209dbbe445664..56d7cf5f452ce8a 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -290,7 +290,7 @@ define <2 x i8> @smear_sign_and_widen_splat(<2 x i6> %x) {
 define i18 @fake_sext(i3 %x) {
 ; CHECK-LABEL: @fake_sext(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i3 [[X:%.*]], 2
-; CHECK-NEXT:    [[SH:%.*]] = zext i3 [[TMP1]] to i18
+; CHECK-NEXT:    [[SH:%.*]] = zext nneg i3 [[TMP1]] to i18
 ; CHECK-NEXT:    ret i18 [[SH]]
 ;
   %sext = sext i3 %x to i18
@@ -314,7 +314,7 @@ define i32 @fake_sext_but_should_not_change_type(i3 %x) {
 define <2 x i8> @fake_sext_splat(<2 x i3> %x) {
 ; CHECK-LABEL: @fake_sext_splat(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i3> [[X:%.*]], <i3 2, i3 2>
-; CHECK-NEXT:    [[SH:%.*]] = zext <2 x i3> [[TMP1]] to <2 x i8>
+; CHECK-NEXT:    [[SH:%.*]] = zext nneg <2 x i3> [[TMP1]] to <2 x i8>
 ; CHECK-NEXT:    ret <2 x i8> [[SH]]
 ;
   %sext = sext <2 x i3> %x to <2 x i8>
@@ -327,7 +327,7 @@ define <2 x i8> @fake_sext_splat(<2 x i3> %x) {
 define <2 x i32> @narrow_lshr_constant(<2 x i8> %x, <2 x i8> %y) {
 ; CHECK-LABEL: @narrow_lshr_constant(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 3, i8 3>
-; CHECK-NEXT:    [[SH:%.*]] = zext <2 x i8> [[TMP1]] to <2 x i32>
+; CHECK-NEXT:    [[SH:%.*]] = zext nneg <2 x i8> [[TMP1]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[SH]]
 ;
   %zx = zext <2 x i8> %x to <2 x i32>
@@ -908,7 +908,7 @@ define <2 x i64> @narrow_bswap_overshift(<2 x i32> %x) {
 ; CHECK-LABEL: @narrow_bswap_overshift(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[X:%.*]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = lshr <2 x i32> [[TMP1]], <i32 16, i32 16>
-; CHECK-NEXT:    [[S:%.*]] = zext <2 x i32> [[TMP2]] to <2 x i64>
+; CHECK-NEXT:    [[S:%.*]] = zext nneg <2 x i32> [[TMP2]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[S]]
 ;
   %z = zext <2 x i32> %x to <2 x i64>
@@ -921,7 +921,7 @@ define i128 @narrow_bswap_overshift2(i96 %x) {
 ; CHECK-LABEL: @narrow_bswap_overshift2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i96 @llvm.bswap.i96(i96 [[X:%.*]])
 ; CHECK-NEXT:    [[TMP2:%.*]] = lshr i96 [[TMP1]], 29
-; CHECK-NEXT:    [[S:%.*]] = zext i96 [[TMP2]] to i128
+; CHECK-NEXT:    [[S:%.*]] = zext nneg i96 [[TMP2]] to i128
 ; CHECK-NEXT:    ret i128 [[S]]
 ;
   %z = zext i96 %x to i128
diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll
index b67c0f2bb612c50..1f7837c109b3f1f 100644
--- a/llvm/test/Transforms/InstCombine/minmax-fold.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll
@@ -57,7 +57,7 @@ define i32 @t4(i64 %a) {
 define i64 @t5(i32 %a) {
 ; CHECK-LABEL: @t5(
 ; CHECK-NEXT:    [[NARROW:%.*]] = call i32 @llvm.smax.i32(i32 [[A:%.*]], i32 5)
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[NARROW]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = zext nneg i32 [[NARROW]] to i64
 ; CHECK-NEXT:    ret i64 [[TMP1]]
 ;
   %1 = icmp slt i32 %a, 5
diff --git a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
index 09003ebacd6ca1c..3802036d2a715ac 100644
--- a/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
+++ b/llvm/test/Transforms/InstCombine/minmax-intrinsics.ll
@@ -346,7 +346,7 @@ define i8 @umin_sext_constant_big(i5 %x) {
 define i8 @umin_zext_constant(i5 %x) {
 ; CHECK-LABEL: @umin_zext_constant(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i5 @llvm.umin.i5(i5 [[X:%.*]], i5 7)
-; CHECK-NEXT:    [[M:%.*]] = zext i5 [[TMP1]] to i8
+; CHECK-NEXT:    [[M:%.*]] = zext nneg i5 [[TMP1]] to i8
 ; CHECK-NEXT:    ret i8 [[M]]
 ;
   %e = zext i5 %x to i8
diff --git a/llvm/test/Transforms/InstCombine/narrow-math.ll b/llvm/test/Transforms/InstCombine/narrow-math.ll
index 6eacb1ca2c018e7..0fcded7b0220a1c 100644
--- a/llvm/test/Transforms/InstCombine/narrow-math.ll
+++ b/llvm/test/Transforms/InstCombine/narrow-math.ll
@@ -28,7 +28,7 @@ define i64 @sext_zext_add_mismatched_exts(i32 %A) {
 ; CHECK-NEXT:    [[B:%.*]] = ashr i32 [[A:%.*]], 7
 ; CHECK-NEXT:    [[C:%.*]] = lshr i32 [[A]], 9
 ; CHECK-NEXT:    [[D:%.*]] = sext i32 [[B]] to i64
-; CHECK-NEXT:    [[E:%.*]] = zext i32 [[C]] to i64
+; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[C]] to i64
 ; CHECK-NEXT:    [[F:%.*]] = add nsw i64 [[D]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
@@ -125,7 +125,7 @@ define i64 @test1(i32 %V) {
 ; CHECK-NEXT:    [[CALL1:%.*]] = call i32 @callee(), !range [[RNG0:![0-9]+]]
 ; CHECK-NEXT:    [[CALL2:%.*]] = call i32 @callee(), !range [[RNG0]]
 ; CHECK-NEXT:    [[NARROW:%.*]] = add nuw nsw i32 [[CALL1]], [[CALL2]]
-; CHECK-NEXT:    [[ADD:%.*]] = zext i32 [[NARROW]] to i64
+; CHECK-NEXT:    [[ADD:%.*]] = zext nneg i32 [[NARROW]] to i64
 ; CHECK-NEXT:    ret i64 [[ADD]]
 ;
   %call1 = call i32 @callee(), !range !0
@@ -156,7 +156,7 @@ define i64 @test3(i32 %V) {
 ; CHECK-NEXT:    [[CALL1:%.*]] = call i32 @callee(), !range [[RNG0]]
 ; CHECK-NEXT:    [[CALL2:%.*]] = call i32 @callee(), !range [[RNG0]]
 ; CHECK-NEXT:    [[NARROW:%.*]] = mul nuw nsw i32 [[CALL1]], [[CALL2]]
-; CHECK-NEXT:    [[ADD:%.*]] = zext i32 [[NARROW]] to i64
+; CHECK-NEXT:    [[ADD:%.*]] = zext nneg i32 [[NARROW]] to i64
 ; CHECK-NEXT:    ret i64 [[ADD]]
 ;
   %call1 = call i32 @callee(), !range !0
@@ -581,8 +581,8 @@ define i64 @test17(i32 %V) {
 ; CHECK-LABEL: @test17(
 ; CHECK-NEXT:    [[CALL1:%.*]] = call i32 @callee(), !range [[RNG0]]
 ; CHECK-NEXT:    [[CALL2:%.*]] = call i32 @callee(), !range [[RNG0]]
-; CHECK-NEXT:    [[SEXT1:%.*]] = zext i32 [[CALL1]] to i64
-; CHECK-NEXT:    [[SEXT2:%.*]] = zext i32 [[CALL2]] to i64
+; CHECK-NEXT:    [[SEXT1:%.*]] = zext nneg i32 [[CALL1]] to i64
+; CHECK-NEXT:    [[SEXT2:%.*]] = zext nneg i32 [[CALL2]] to i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i64 [[SEXT1]], [[SEXT2]]
 ; CHECK-NEXT:    ret i64 [[SUB]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/negated-bitmask.ll b/llvm/test/Transforms/InstCombine/negated-bitmask.ll
index e1d3a030297b2d7..fe2386bd65c3180 100644
--- a/llvm/test/Transforms/InstCombine/negated-bitmask.ll
+++ b/llvm/test/Transforms/InstCombine/negated-bitmask.ll
@@ -216,7 +216,7 @@ define <2 x i64> @neg_signbit_use1(<2 x i32> %x) {
 define i8 @neg_signbit_use2(i5 %x) {
 ; CHECK-LABEL: @neg_signbit_use2(
 ; CHECK-NEXT:    [[S:%.*]] = lshr i5 [[X:%.*]], 4
-; CHECK-NEXT:    [[Z:%.*]] = zext i5 [[S]] to i8
+; CHECK-NEXT:    [[Z:%.*]] = zext nneg i5 [[S]] to i8
 ; CHECK-NEXT:    call void @usei8(i8 [[Z]])
 ; CHECK-NEXT:    [[R:%.*]] = sub nsw i8 0, [[Z]]
 ; CHECK-NEXT:    ret i8 [[R]]
@@ -248,7 +248,7 @@ define i32 @neg_not_signbit1(i8 %x) {
 define i32 @neg_not_signbit2(i8 %x) {
 ; CHECK-LABEL: @neg_not_signbit2(
 ; CHECK-NEXT:    [[S:%.*]] = lshr i8 [[X:%.*]], 6
-; CHECK-NEXT:    [[Z:%.*]] = zext i8 [[S]] to i32
+; CHECK-NEXT:    [[Z:%.*]] = zext nneg i8 [[S]] to i32
 ; CHECK-NEXT:    [[R:%.*]] = sub nsw i32 0, [[Z]]
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/overflow-mul.ll b/llvm/test/Transforms/InstCombine/overflow-mul.ll
index 9a9b18bde7567a9..6b5a65c03ee102b 100644
--- a/llvm/test/Transforms/InstCombine/overflow-mul.ll
+++ b/llvm/test/Transforms/InstCombine/overflow-mul.ll
@@ -307,7 +307,7 @@ define i32 @extra_and_use_small_mask(i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[UMUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
 ; CHECK-NEXT:    [[UMUL_VALUE:%.*]] = extractvalue { i32, i1 } [[UMUL]], 0
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[UMUL_VALUE]], 268435455
-; CHECK-NEXT:    [[AND:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[AND:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i32, i1 } [[UMUL]], 1
 ; CHECK-NEXT:    call void @use.i64(i64 [[AND]])
 ; CHECK-NEXT:    [[RETVAL:%.*]] = zext i1 [[OVERFLOW]] to i32
diff --git a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
index dbff28487036ff5..bbb8d848be6f4f7 100644
--- a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
+++ b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
@@ -17,7 +17,7 @@ define i32 @reduce_add_sext(<4 x i1> %x) {
 ; CHECK-LABEL: @reduce_add_sext(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x i1> [[X:%.*]] to i4
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i4 @llvm.ctpop.i4(i4 [[TMP1]]), !range [[RNG1:![0-9]+]]
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i4 [[TMP2]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i4 [[TMP2]] to i32
 ; CHECK-NEXT:    [[RES:%.*]] = sub nsw i32 0, [[TMP3]]
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
@@ -30,7 +30,7 @@ define i64 @reduce_add_zext(<8 x i1> %x) {
 ; CHECK-LABEL: @reduce_add_zext(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0]]
-; CHECK-NEXT:    [[RES:%.*]] = zext i8 [[TMP2]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i8 [[TMP2]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   %zext = zext <8 x i1> %x to <8 x i64>
@@ -87,7 +87,7 @@ define i64 @reduce_add_zext_external_use(<8 x i1> %x) {
 ; CHECK-LABEL: @reduce_add_zext_external_use(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0]]
-; CHECK-NEXT:    [[RES:%.*]] = zext i8 [[TMP2]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i8 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = extractelement <8 x i1> [[X]], i64 0
 ; CHECK-NEXT:    [[EXT:%.*]] = zext i1 [[TMP3]] to i64
 ; CHECK-NEXT:    store i64 [[EXT]], ptr @glob1, align 8
diff --git a/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll b/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
index 40175cab3305962..97b6f7b6d96cdce 100644
--- a/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
+++ b/llvm/test/Transforms/InstCombine/reduction-xor-sext-zext-i1.ll
@@ -32,7 +32,7 @@ define i64 @reduce_xor_zext(<8 x i1> %x) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP2]], 1
-; CHECK-NEXT:    [[RES:%.*]] = zext i8 [[TMP3]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i8 [[TMP3]] to i64
 ; CHECK-NEXT:    ret i64 [[RES]]
 ;
   %zext = zext <8 x i1> %x to <8 x i64>
@@ -93,7 +93,7 @@ define i64 @reduce_xor_zext_external_use(<8 x i1> %x) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i1> [[X:%.*]] to i8
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.ctpop.i8(i8 [[TMP1]]), !range [[RNG0]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i8 [[TMP2]], 1
-; CHECK-NEXT:    [[RES:%.*]] = zext i8 [[TMP3]] to i64
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i8 [[TMP3]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <8 x i1> [[X]], i64 0
 ; CHECK-NEXT:    [[EXT:%.*]] = zext i1 [[TMP4]] to i64
 ; CHECK-NEXT:    store i64 [[EXT]], ptr @glob1, align 8
diff --git a/llvm/test/Transforms/InstCombine/rem.ll b/llvm/test/Transforms/InstCombine/rem.ll
index 878581c3518acb0..364d51f343fb3c5 100644
--- a/llvm/test/Transforms/InstCombine/rem.ll
+++ b/llvm/test/Transforms/InstCombine/rem.ll
@@ -354,7 +354,7 @@ define i64 @test15(i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[NOTMASK:%.*]] = shl nsw i32 -1, [[Y:%.*]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]]
-; CHECK-NEXT:    [[UREM:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[UREM:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    ret i64 [[UREM]]
 ;
   %shl = shl i32 1, %y
diff --git a/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll b/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll
index 27de0f7723685bd..6910aa5b57a2966 100644
--- a/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll
+++ b/llvm/test/Transforms/InstCombine/select-bitext-bitwise-ops.ll
@@ -5,7 +5,7 @@ define i64 @sel_false_val_is_a_masked_shl_of_true_val1(i32 %x, i64 %y) {
 ; CHECK-LABEL: @sel_false_val_is_a_masked_shl_of_true_val1(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 60
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -22,7 +22,7 @@ define i64 @sel_false_val_is_a_masked_shl_of_true_val2(i32 %x, i64 %y) {
 ; CHECK-LABEL: @sel_false_val_is_a_masked_shl_of_true_val2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 60
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -39,7 +39,7 @@ define i64 @sel_false_val_is_a_masked_lshr_of_true_val1(i32 %x, i64 %y) {
 ; CHECK-LABEL: @sel_false_val_is_a_masked_lshr_of_true_val1(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -56,7 +56,7 @@ define i64 @sel_false_val_is_a_masked_lshr_of_true_val2(i32 %x, i64 %y) {
 ; CHECK-LABEL: @sel_false_val_is_a_masked_lshr_of_true_val2(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/select-bitext.ll b/llvm/test/Transforms/InstCombine/select-bitext.ll
index 75272c643926db5..4c09448aa833910 100644
--- a/llvm/test/Transforms/InstCombine/select-bitext.ll
+++ b/llvm/test/Transforms/InstCombine/select-bitext.ll
@@ -166,7 +166,7 @@ define <2 x i32> @trunc_sel_equal_sext_vec(<2 x i32> %a, <2 x i1> %cmp) {
 define i64 @trunc_sel_larger_zext(i32 %a, i1 %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_zext(
 ; CHECK-NEXT:    [[TRUNC_MASK:%.*]] = and i32 [[A:%.*]], 65535
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[TRUNC_MASK]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = zext nneg i32 [[TRUNC_MASK]] to i64
 ; CHECK-NEXT:    [[EXT:%.*]] = select i1 [[CMP:%.*]], i64 [[TMP1]], i64 42
 ; CHECK-NEXT:    ret i64 [[EXT]]
 ;
@@ -179,7 +179,7 @@ define i64 @trunc_sel_larger_zext(i32 %a, i1 %cmp) {
 define <2 x i64> @trunc_sel_larger_zext_vec(<2 x i32> %a, <2 x i1> %cmp) {
 ; CHECK-LABEL: @trunc_sel_larger_zext_vec(
 ; CHECK-NEXT:    [[TRUNC_MASK:%.*]] = and <2 x i32> [[A:%.*]], <i32 65535, i32 65535>
-; CHECK-NEXT:    [[TMP1:%.*]] = zext <2 x i32> [[TRUNC_MASK]] to <2 x i64>
+; CHECK-NEXT:    [[TMP1:%.*]] = zext nneg <2 x i32> [[TRUNC_MASK]] to <2 x i64>
 ; CHECK-NEXT:    [[EXT:%.*]] = select <2 x i1> [[CMP:%.*]], <2 x i64> [[TMP1]], <2 x i64> <i64 42, i64 43>
 ; CHECK-NEXT:    ret <2 x i64> [[EXT]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll b/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
index 01faae90a65d7b9..03dd6188ac039fc 100644
--- a/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
+++ b/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
@@ -142,7 +142,7 @@ define i64 @test6b(i64 %x) {
 define i32 @test1c(i16 %x) {
 ; CHECK-LABEL: @test1c(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0]]
-; CHECK-NEXT:    [[CAST2:%.*]] = zext i16 [[CT]] to i32
+; CHECK-NEXT:    [[CAST2:%.*]] = zext nneg i16 [[CT]] to i32
 ; CHECK-NEXT:    ret i32 [[CAST2]]
 ;
   %ct = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
@@ -155,7 +155,7 @@ define i32 @test1c(i16 %x) {
 define i64 @test2c(i16 %x) {
 ; CHECK-LABEL: @test2c(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0]]
-; CHECK-NEXT:    [[CONV:%.*]] = zext i16 [[CT]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i16 [[CT]] to i64
 ; CHECK-NEXT:    ret i64 [[CONV]]
 ;
   %ct = tail call i16 @llvm.cttz.i16(i16 %x, i1 true)
@@ -168,7 +168,7 @@ define i64 @test2c(i16 %x) {
 define i64 @test3c(i32 %x) {
 ; CHECK-LABEL: @test3c(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CONV:%.*]] = zext i32 [[CT]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i32 [[CT]] to i64
 ; CHECK-NEXT:    ret i64 [[CONV]]
 ;
   %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
@@ -181,7 +181,7 @@ define i64 @test3c(i32 %x) {
 define i32 @test4c(i16 %x) {
 ; CHECK-LABEL: @test4c(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0]]
-; CHECK-NEXT:    [[CAST:%.*]] = zext i16 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = zext nneg i16 [[CT]] to i32
 ; CHECK-NEXT:    ret i32 [[CAST]]
 ;
   %ct = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
@@ -194,7 +194,7 @@ define i32 @test4c(i16 %x) {
 define i64 @test5c(i16 %x) {
 ; CHECK-LABEL: @test5c(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0]]
-; CHECK-NEXT:    [[CAST:%.*]] = zext i16 [[CT]] to i64
+; CHECK-NEXT:    [[CAST:%.*]] = zext nneg i16 [[CT]] to i64
 ; CHECK-NEXT:    ret i64 [[CAST]]
 ;
   %ct = tail call i16 @llvm.ctlz.i16(i16 %x, i1 true)
@@ -207,7 +207,7 @@ define i64 @test5c(i16 %x) {
 define i64 @test6c(i32 %x) {
 ; CHECK-LABEL: @test6c(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CAST:%.*]] = zext i32 [[CT]] to i64
+; CHECK-NEXT:    [[CAST:%.*]] = zext nneg i32 [[CT]] to i64
 ; CHECK-NEXT:    ret i64 [[CAST]]
 ;
   %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
@@ -387,7 +387,7 @@ define i16 @test6d(i32 %x) {
 define i64 @select_bug1(i32 %x) {
 ; CHECK-LABEL: @select_bug1(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CONV:%.*]] = zext i32 [[CT]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i32 [[CT]] to i64
 ; CHECK-NEXT:    ret i64 [[CONV]]
 ;
   %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
@@ -565,7 +565,7 @@ define i32 @test_multiuse_undef(i32 %x, ptr %p) {
 define i64 @test_multiuse_zext_def(i32 %x, ptr %p) {
 ; CHECK-LABEL: @test_multiuse_zext_def(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CONV:%.*]] = zext i32 [[CT]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i32 [[CT]] to i64
 ; CHECK-NEXT:    store i64 [[CONV]], ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret i64 [[CONV]]
 ;
@@ -580,7 +580,7 @@ define i64 @test_multiuse_zext_def(i32 %x, ptr %p) {
 define i64 @test_multiuse_zext_undef(i32 %x, ptr %p) {
 ; CHECK-LABEL: @test_multiuse_zext_undef(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CONV:%.*]] = zext i32 [[CT]] to i64
+; CHECK-NEXT:    [[CONV:%.*]] = zext nneg i32 [[CT]] to i64
 ; CHECK-NEXT:    store i64 [[CONV]], ptr [[P:%.*]], align 4
 ; CHECK-NEXT:    ret i64 [[CONV]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll
index f8649e671fa0803..91ef90db72bd18e 100644
--- a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll
+++ b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll
@@ -244,7 +244,7 @@ define i32 @select_clz_to_ctz_wrong_constant_for_zero(i32 %a) {
 define i4 @PR45762(i3 %x4) {
 ; CHECK-LABEL: @PR45762(
 ; CHECK-NEXT:    [[T4:%.*]] = call i3 @llvm.cttz.i3(i3 [[X4:%.*]], i1 false), !range [[RNG2:![0-9]+]]
-; CHECK-NEXT:    [[T7:%.*]] = zext i3 [[T4]] to i4
+; CHECK-NEXT:    [[T7:%.*]] = zext nneg i3 [[T4]] to i4
 ; CHECK-NEXT:    [[ONE_HOT_16:%.*]] = shl nuw i4 1, [[T7]]
 ; CHECK-NEXT:    [[OR_69_NOT:%.*]] = icmp eq i3 [[X4]], 0
 ; CHECK-NEXT:    [[UMUL_231:%.*]] = shl i4 [[ONE_HOT_16]], [[T7]]
@@ -273,7 +273,7 @@ define i4 @PR45762(i3 %x4) {
 define i4 @PR45762_logical(i3 %x4) {
 ; CHECK-LABEL: @PR45762_logical(
 ; CHECK-NEXT:    [[T4:%.*]] = call i3 @llvm.cttz.i3(i3 [[X4:%.*]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[T7:%.*]] = zext i3 [[T4]] to i4
+; CHECK-NEXT:    [[T7:%.*]] = zext nneg i3 [[T4]] to i4
 ; CHECK-NEXT:    [[ONE_HOT_16:%.*]] = shl nuw i4 1, [[T7]]
 ; CHECK-NEXT:    [[OR_69_NOT:%.*]] = icmp eq i3 [[X4]], 0
 ; CHECK-NEXT:    [[UMUL_231:%.*]] = shl i4 [[ONE_HOT_16]], [[T7]]
diff --git a/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll b/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll
index 5e3950c7fff2821..7c70178e1bd5170 100644
--- a/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll
+++ b/llvm/test/Transforms/InstCombine/select-obo-peo-ops.ll
@@ -5,7 +5,7 @@ define i64 @test_shl_nuw_nsw__all_are_safe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_shl_nuw_nsw__all_are_safe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 60
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -22,7 +22,7 @@ define i64 @test_shl_nuw__all_are_safe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_shl_nuw__all_are_safe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 60
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -39,7 +39,7 @@ define i64 @test_shl_nsw__all_are_safe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_shl_nsw__all_are_safe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 60
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -56,7 +56,7 @@ define i64 @test_shl__all_are_safe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_shl__all_are_safe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 60
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -270,7 +270,7 @@ define i64 @test_lshr_exact__exact_is_safe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_lshr_exact__exact_is_safe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -287,7 +287,7 @@ define i64 @test_lshr__exact_is_safe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_lshr__exact_is_safe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -304,7 +304,7 @@ define i64 @test_lshr_exact__exact_is_unsafe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_lshr_exact__exact_is_unsafe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
@@ -321,7 +321,7 @@ define i64 @test_lshr__exact_is_unsafe(i32 %x, i64 %y) {
 ; CHECK-LABEL: @test_lshr__exact_is_unsafe(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2
 ; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 15
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
 ; CHECK-NEXT:    ret i64 [[TMP4]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll
index 6e31f9b20e3a96d..c4a5d9bc5bf79ab 100644
--- a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll
+++ b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll
@@ -527,7 +527,7 @@ define i32 @select_icmp_and_8_eq_0_xor_8(i32 %x) {
 define i64 @select_icmp_x_and_8_eq_0_y_xor_8(i32 %x, i64 %y) {
 ; CHECK-LABEL: @select_icmp_x_and_8_eq_0_y_xor_8(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 8
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i32 [[AND]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = zext nneg i32 [[AND]] to i64
 ; CHECK-NEXT:    [[Y_XOR:%.*]] = xor i64 [[TMP1]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i64 [[Y_XOR]]
 ;
@@ -542,7 +542,7 @@ define i64 @select_icmp_x_and_8_ne_0_y_xor_8(i32 %x, i64 %y) {
 ; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y_xor_8(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 8
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[AND]], 8
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[XOR_Y:%.*]] = xor i64 [[TMP2]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i64 [[XOR_Y]]
 ;
@@ -557,7 +557,7 @@ define i64 @select_icmp_x_and_8_ne_0_y_or_8(i32 %x, i64 %y) {
 ; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y_or_8(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 8
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[AND]], 8
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[OR_Y:%.*]] = or i64 [[TMP2]], [[Y:%.*]]
 ; CHECK-NEXT:    ret i64 [[OR_Y]]
 ;
@@ -572,7 +572,7 @@ define <2 x i64> @select_icmp_x_and_8_ne_0_y_or_8_vec(<2 x i32> %x, <2 x i64> %y
 ; CHECK-LABEL: @select_icmp_x_and_8_ne_0_y_or_8_vec(
 ; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 8, i32 8>
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i32> [[AND]], <i32 8, i32 8>
-; CHECK-NEXT:    [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[OR_Y:%.*]] = or <2 x i64> [[TMP2]], [[Y:%.*]]
 ; CHECK-NEXT:    ret <2 x i64> [[OR_Y]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll
index 8d36b1f3cb22170..e49fea4936217c8 100644
--- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll
+++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll
@@ -16,7 +16,7 @@
 define i1 @t0_const_after_fold_lshr_shl_ne(i32 %x, i64 %y, i32 %len) {
 ; CHECK-LABEL: @t0_const_after_fold_lshr_shl_ne(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[TMP2]], [[Y:%.*]]
 ; CHECK-NEXT:    [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
 ; CHECK-NEXT:    ret i1 [[T5]]
@@ -39,7 +39,7 @@ define i1 @t0_const_after_fold_lshr_shl_ne(i32 %x, i64 %y, i32 %len) {
 define <2 x i1> @t1_vec_splat(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) {
 ; CHECK-LABEL: @t1_vec_splat(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[TMP2:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[Y:%.*]]
 ; CHECK-NEXT:    [[T5:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer
 ; CHECK-NEXT:    ret <2 x i1> [[T5]]
@@ -211,7 +211,7 @@ define i1 @t6_oneuse3(i32 %x, i64 %y, i32 %len) {
 ; CHECK-NEXT:    [[T3:%.*]] = shl i64 [[Y:%.*]], [[T2_WIDE]]
 ; CHECK-NEXT:    call void @use64(i64 [[T3]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]]
 ; CHECK-NEXT:    [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
 ; CHECK-NEXT:    ret i1 [[T5]]
@@ -243,7 +243,7 @@ define i1 @t7_oneuse4(i32 %x, i64 %y, i32 %len) {
 ; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
 ; CHECK-NEXT:    call void @use32(i32 [[T3_TRUNC]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]]
 ; CHECK-NEXT:    [[T5:%.*]] = icmp ne i64 [[TMP3]], 0
 ; CHECK-NEXT:    ret i1 [[T5]]
diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll
index 913ef2a74aebd9a..f7135971cf9eebd 100644
--- a/llvm/test/Transforms/InstCombine/shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift.ll
@@ -1258,7 +1258,7 @@ define i64 @shl_zext(i32 %t) {
 define i64 @shl_zext_extra_use(i32 %t) {
 ; CHECK-LABEL: @shl_zext_extra_use(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[T:%.*]], 16777215
-; CHECK-NEXT:    [[EXT:%.*]] = zext i32 [[AND]] to i64
+; CHECK-NEXT:    [[EXT:%.*]] = zext nneg i32 [[AND]] to i64
 ; CHECK-NEXT:    call void @use(i64 [[EXT]])
 ; CHECK-NEXT:    [[SHL:%.*]] = shl nuw nsw i64 [[EXT]], 8
 ; CHECK-NEXT:    ret i64 [[SHL]]
diff --git a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
index b5dcb9b67d676ed..12d1e847ef3c082 100644
--- a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
@@ -243,7 +243,7 @@ define <2 x i32> @trunc_ashr_vec(<2 x i32> %X) {
 define i92 @test7(i64 %A) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 32
-; CHECK-NEXT:    [[D:%.*]] = zext i64 [[TMP1]] to i92
+; CHECK-NEXT:    [[D:%.*]] = zext nneg i64 [[TMP1]] to i92
 ; CHECK-NEXT:    ret i92 [[D]]
 ;
   %B = zext i64 %A to i128
@@ -344,7 +344,7 @@ define i64 @test11(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test11(
 ; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
-; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
@@ -360,7 +360,7 @@ define <2 x i64> @test11_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test11_vec(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -376,7 +376,7 @@ define <2 x i64> @test11_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test11_vec_nonuniform(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -409,7 +409,7 @@ define i64 @test12(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test12(
 ; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
-; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[F:%.*]] = lshr i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
@@ -425,7 +425,7 @@ define <2 x i64> @test12_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test12_vec(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -441,7 +441,7 @@ define <2 x i64> @test12_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test12_vec_nonuniform(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -474,7 +474,7 @@ define i64 @test13(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test13(
 ; CHECK-NEXT:    [[C:%.*]] = sext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
-; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[F:%.*]] = ashr i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
@@ -490,7 +490,7 @@ define <2 x i64> @test13_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test13_vec(
 ; CHECK-NEXT:    [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -506,7 +506,7 @@ define <2 x i64> @test13_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test13_vec_nonuniform(
 ; CHECK-NEXT:    [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll
index 33baee858493a63..22c0ffa81916e1e 100644
--- a/llvm/test/Transforms/InstCombine/trunc.ll
+++ b/llvm/test/Transforms/InstCombine/trunc.ll
@@ -243,7 +243,7 @@ define <2 x i32> @trunc_ashr_vec(<2 x i32> %X) {
 define i92 @test7(i64 %A) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 32
-; CHECK-NEXT:    [[D:%.*]] = zext i64 [[TMP1]] to i92
+; CHECK-NEXT:    [[D:%.*]] = zext nneg i64 [[TMP1]] to i92
 ; CHECK-NEXT:    ret i92 [[D]]
 ;
   %B = zext i64 %A to i128
@@ -344,7 +344,7 @@ define i64 @test11(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test11(
 ; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
-; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
@@ -360,7 +360,7 @@ define <2 x i64> @test11_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test11_vec(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -376,7 +376,7 @@ define <2 x i64> @test11_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test11_vec_nonuniform(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = shl nuw nsw <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -409,7 +409,7 @@ define i64 @test12(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test12(
 ; CHECK-NEXT:    [[C:%.*]] = zext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
-; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[F:%.*]] = lshr i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
@@ -425,7 +425,7 @@ define <2 x i64> @test12_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test12_vec(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -441,7 +441,7 @@ define <2 x i64> @test12_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test12_vec_nonuniform(
 ; CHECK-NEXT:    [[C:%.*]] = zext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -474,7 +474,7 @@ define i64 @test13(i32 %A, i32 %B) {
 ; CHECK-LABEL: @test13(
 ; CHECK-NEXT:    [[C:%.*]] = sext i32 [[A:%.*]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[B:%.*]], 31
-; CHECK-NEXT:    [[E:%.*]] = zext i32 [[TMP1]] to i64
+; CHECK-NEXT:    [[E:%.*]] = zext nneg i32 [[TMP1]] to i64
 ; CHECK-NEXT:    [[F:%.*]] = ashr i64 [[C]], [[E]]
 ; CHECK-NEXT:    ret i64 [[F]]
 ;
@@ -490,7 +490,7 @@ define <2 x i64> @test13_vec(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test13_vec(
 ; CHECK-NEXT:    [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
@@ -506,7 +506,7 @@ define <2 x i64> @test13_vec_nonuniform(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-LABEL: @test13_vec_nonuniform(
 ; CHECK-NEXT:    [[C:%.*]] = sext <2 x i32> [[A:%.*]] to <2 x i64>
 ; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[B:%.*]], <i32 31, i32 15>
-; CHECK-NEXT:    [[E:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
+; CHECK-NEXT:    [[E:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64>
 ; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i64> [[C]], [[E]]
 ; CHECK-NEXT:    ret <2 x i64> [[F]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/udiv-simplify.ll b/llvm/test/Transforms/InstCombine/udiv-simplify.ll
index c55f64e65922e03..dd2dc28d4bcc5e9 100644
--- a/llvm/test/Transforms/InstCombine/udiv-simplify.ll
+++ b/llvm/test/Transforms/InstCombine/udiv-simplify.ll
@@ -55,7 +55,7 @@ define i64 @test2_PR2274(i32 %x, i32 %v) nounwind {
 define i32 @PR30366(i1 %a) {
 ; CHECK-LABEL: @PR30366(
 ; CHECK-NEXT:    [[Z:%.*]] = zext i1 [[A:%.*]] to i32
-; CHECK-NEXT:    [[Z2:%.*]] = zext i16 shl (i16 1, i16 ptrtoint (ptr @b to i16)) to i32
+; CHECK-NEXT:    [[Z2:%.*]] = zext nneg i16 shl (i16 1, i16 ptrtoint (ptr @b to i16)) to i32
 ; CHECK-NEXT:    [[D:%.*]] = udiv i32 [[Z]], [[Z2]]
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
diff --git a/llvm/test/Transforms/InstCombine/udivrem-change-width.ll b/llvm/test/Transforms/InstCombine/udivrem-change-width.ll
index f4094cf5008adb0..f45f1354329d3c7 100644
--- a/llvm/test/Transforms/InstCombine/udivrem-change-width.ll
+++ b/llvm/test/Transforms/InstCombine/udivrem-change-width.ll
@@ -161,7 +161,7 @@ define i32 @urem_illegal_type(i9 %a, i9 %b) {
 define i32 @udiv_i32_c(i8 %a) {
 ; CHECK-LABEL: @udiv_i32_c(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i8 [[A:%.*]], 10
-; CHECK-NEXT:    [[UDIV:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[UDIV:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[UDIV]]
 ;
   %za = zext i8 %a to i32
@@ -196,7 +196,7 @@ define i32 @udiv_i32_c_multiuse(i8 %a) {
 define i32 @udiv_illegal_type_c(i9 %a) {
 ; CHECK-LABEL: @udiv_illegal_type_c(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i9 [[A:%.*]], 10
-; CHECK-NEXT:    [[UDIV:%.*]] = zext i9 [[TMP1]] to i32
+; CHECK-NEXT:    [[UDIV:%.*]] = zext nneg i9 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[UDIV]]
 ;
   %za = zext i9 %a to i32
@@ -207,7 +207,7 @@ define i32 @udiv_illegal_type_c(i9 %a) {
 define i32 @urem_i32_c(i8 %a) {
 ; CHECK-LABEL: @urem_i32_c(
 ; CHECK-NEXT:    [[TMP1:%.*]] = urem i8 [[A:%.*]], 10
-; CHECK-NEXT:    [[UREM:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[UREM:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[UREM]]
 ;
   %za = zext i8 %a to i32
@@ -218,7 +218,7 @@ define i32 @urem_i32_c(i8 %a) {
 define <2 x i32> @urem_i32_c_vec(<2 x i8> %a) {
 ; CHECK-LABEL: @urem_i32_c_vec(
 ; CHECK-NEXT:    [[TMP1:%.*]] = urem <2 x i8> [[A:%.*]], <i8 10, i8 17>
-; CHECK-NEXT:    [[UREM:%.*]] = zext <2 x i8> [[TMP1]] to <2 x i32>
+; CHECK-NEXT:    [[UREM:%.*]] = zext nneg <2 x i8> [[TMP1]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[UREM]]
 ;
   %za = zext <2 x i8> %a to <2 x i32>
@@ -242,7 +242,7 @@ define i32 @urem_i32_c_multiuse(i8 %a) {
 define i32 @urem_illegal_type_c(i9 %a) {
 ; CHECK-LABEL: @urem_illegal_type_c(
 ; CHECK-NEXT:    [[TMP1:%.*]] = urem i9 [[A:%.*]], 10
-; CHECK-NEXT:    [[UREM:%.*]] = zext i9 [[TMP1]] to i32
+; CHECK-NEXT:    [[UREM:%.*]] = zext nneg i9 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[UREM]]
 ;
   %za = zext i9 %a to i32
@@ -253,7 +253,7 @@ define i32 @urem_illegal_type_c(i9 %a) {
 define i32 @udiv_c_i32(i8 %a) {
 ; CHECK-LABEL: @udiv_c_i32(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i8 10, [[A:%.*]]
-; CHECK-NEXT:    [[UDIV:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[UDIV:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[UDIV]]
 ;
   %za = zext i8 %a to i32
@@ -264,7 +264,7 @@ define i32 @udiv_c_i32(i8 %a) {
 define i32 @urem_c_i32(i8 %a) {
 ; CHECK-LABEL: @urem_c_i32(
 ; CHECK-NEXT:    [[TMP1:%.*]] = urem i8 10, [[A:%.*]]
-; CHECK-NEXT:    [[UREM:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[UREM:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[UREM]]
 ;
   %za = zext i8 %a to i32
@@ -295,7 +295,7 @@ define i32 @udiv_constexpr(i8 %a) {
 define i32 @udiv_const_constexpr(i8 %a) {
 ; CHECK-LABEL: @udiv_const_constexpr(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i8 42, ptrtoint (ptr @g1 to i8)
-; CHECK-NEXT:    [[D:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[D:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
   %z = zext i8 ptrtoint (ptr @g1 to i8) to i32
@@ -310,7 +310,7 @@ define i32 @udiv_const_constexpr(i8 %a) {
 define i32 @urem_const_constexpr(i8 %a) {
 ; CHECK-LABEL: @urem_const_constexpr(
 ; CHECK-NEXT:    [[TMP1:%.*]] = urem i8 42, ptrtoint (ptr @g2 to i8)
-; CHECK-NEXT:    [[D:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[D:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
   %z = zext i8 ptrtoint (ptr @g2 to i8) to i32
@@ -323,7 +323,7 @@ define i32 @urem_const_constexpr(i8 %a) {
 define i32 @udiv_constexpr_const(i8 %a) {
 ; CHECK-LABEL: @udiv_constexpr_const(
 ; CHECK-NEXT:    [[TMP1:%.*]] = udiv i8 ptrtoint (ptr @g3 to i8), 42
-; CHECK-NEXT:    [[D:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[D:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
   %z = zext i8 ptrtoint (ptr @g3 to i8) to i32
@@ -336,7 +336,7 @@ define i32 @udiv_constexpr_const(i8 %a) {
 define i32 @urem_constexpr_const(i8 %a) {
 ; CHECK-LABEL: @urem_constexpr_const(
 ; CHECK-NEXT:    [[TMP1:%.*]] = urem i8 ptrtoint (ptr @g4 to i8), 42
-; CHECK-NEXT:    [[D:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[D:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
   %z = zext i8 ptrtoint (ptr @g4 to i8) to i32
diff --git a/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll
index b2409e59777a899..67ef38763753725 100644
--- a/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vector-casts-inseltpoison.ll
@@ -164,7 +164,7 @@ define void @convert(ptr %dst.addr, <2 x i64> %src) {
 define <2 x i65> @foo(<2 x i64> %t) {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:    [[A_MASK:%.*]] = and <2 x i64> [[T:%.*]], <i64 4294967295, i64 4294967295>
-; CHECK-NEXT:    [[B:%.*]] = zext <2 x i64> [[A_MASK]] to <2 x i65>
+; CHECK-NEXT:    [[B:%.*]] = zext nneg <2 x i64> [[A_MASK]] to <2 x i65>
 ; CHECK-NEXT:    ret <2 x i65> [[B]]
 ;
   %a = trunc <2 x i64> %t to <2 x i32>
diff --git a/llvm/test/Transforms/InstCombine/vector-casts.ll b/llvm/test/Transforms/InstCombine/vector-casts.ll
index 8ad81831277b664..21e0818fa001e64 100644
--- a/llvm/test/Transforms/InstCombine/vector-casts.ll
+++ b/llvm/test/Transforms/InstCombine/vector-casts.ll
@@ -164,7 +164,7 @@ define void @convert(ptr %dst.addr, <2 x i64> %src) {
 define <2 x i65> @foo(<2 x i64> %t) {
 ; CHECK-LABEL: @foo(
 ; CHECK-NEXT:    [[A_MASK:%.*]] = and <2 x i64> [[T:%.*]], <i64 4294967295, i64 4294967295>
-; CHECK-NEXT:    [[B:%.*]] = zext <2 x i64> [[A_MASK]] to <2 x i65>
+; CHECK-NEXT:    [[B:%.*]] = zext nneg <2 x i64> [[A_MASK]] to <2 x i65>
 ; CHECK-NEXT:    ret <2 x i65> [[B]]
 ;
   %a = trunc <2 x i64> %t to <2 x i32>
diff --git a/llvm/test/Transforms/InstCombine/wcslen-1.ll b/llvm/test/Transforms/InstCombine/wcslen-1.ll
index 4a9a4b926320274..138b3ff585c54d1 100644
--- a/llvm/test/Transforms/InstCombine/wcslen-1.ll
+++ b/llvm/test/Transforms/InstCombine/wcslen-1.ll
@@ -124,7 +124,7 @@ define i64 @test_simplify11(i32 %x) {
 ; CHECK-LABEL: @test_simplify11(
 ; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X:%.*]], 7
 ; CHECK-NEXT:    [[NARROW:%.*]] = sub nuw nsw i32 9, [[AND]]
-; CHECK-NEXT:    [[HELLO_L:%.*]] = zext i32 [[NARROW]] to i64
+; CHECK-NEXT:    [[HELLO_L:%.*]] = zext nneg i32 [[NARROW]] to i64
 ; CHECK-NEXT:    ret i64 [[HELLO_L]]
 ;
   %and = and i32 %x, 7
diff --git a/llvm/test/Transforms/InstCombine/wcslen-3.ll b/llvm/test/Transforms/InstCombine/wcslen-3.ll
index 6dc9534c4986e90..6cabe0570cfe49d 100644
--- a/llvm/test/Transforms/InstCombine/wcslen-3.ll
+++ b/llvm/test/Transforms/InstCombine/wcslen-3.ll
@@ -125,7 +125,7 @@ define i64 @test_simplify11(i16 %x) {
 ; CHECK-LABEL: @test_simplify11(
 ; CHECK-NEXT:    [[AND:%.*]] = and i16 [[X:%.*]], 7
 ; CHECK-NEXT:    [[NARROW:%.*]] = sub nuw nsw i16 9, [[AND]]
-; CHECK-NEXT:    [[HELLO_L:%.*]] = zext i16 [[NARROW]] to i64
+; CHECK-NEXT:    [[HELLO_L:%.*]] = zext nneg i16 [[NARROW]] to i64
 ; CHECK-NEXT:    ret i64 [[HELLO_L]]
 ;
   %and = and i16 %x, 7
diff --git a/llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll b/llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll
index 1a8a59ee15c7bef..93f7e368378f5f9 100644
--- a/llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll
+++ b/llvm/test/Transforms/InstCombine/zeroext-and-reduce.ll
@@ -4,7 +4,7 @@
 define i32 @test1(i8 %X) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X:%.*]], 8
-; CHECK-NEXT:    [[Z:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[Z:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[Z]]
 ;
   %Y = zext i8 %X to i32
diff --git a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
index dcb8c081f154aa0..dada32d1b744983 100644
--- a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
+++ b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll
@@ -119,9 +119,9 @@ block2:
 
 define i32 @zext_or_eq_ult_add(i32 %i) {
 ; CHECK-LABEL: @zext_or_eq_ult_add(
-; CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[I:%.*]], -3
-; CHECK-NEXT:    [[O:%.*]] = icmp ult i32 [[TMP1]], 3
-; CHECK-NEXT:    [[R:%.*]] = zext i1 [[O]] to i32
+; CHECK-NEXT:    [[A:%.*]] = add i32 [[I:%.*]], -3
+; CHECK-NEXT:    [[C1:%.*]] = icmp ult i32 [[A]], 3
+; CHECK-NEXT:    [[R:%.*]] = zext i1 [[C1]] to i32
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
   %a = add i32 %i, -3
@@ -175,7 +175,7 @@ define i8 @PR49475_infloop(i32 %t0, i16 %insert, i64 %e, i8 %i162) {
 ; CHECK-NEXT:    [[T1:%.*]] = or i1 [[B]], [[B2]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[T0]], 1
 ; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], 140
-; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = zext nneg i32 [[TMP2]] to i64
 ; CHECK-NEXT:    [[XOR1:%.*]] = select i1 [[T1]], i64 [[TMP3]], i64 140
 ; CHECK-NEXT:    [[CONV16:%.*]] = sext i8 [[I162:%.*]] to i64
 ; CHECK-NEXT:    [[SUB17:%.*]] = sub i64 [[CONV16]], [[E:%.*]]
@@ -242,7 +242,7 @@ define i1 @PR51762(ptr %i, i32 %t0, i16 %t1, ptr %p, ptr %d, ptr %f, i32 %p2, i1
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[T3]], [[CONV17]]
 ; CHECK-NEXT:    store i32 [[ADD]], ptr [[F]], align 4
 ; CHECK-NEXT:    [[REM18:%.*]] = srem i32 [[LOR_EXT]], [[ADD]]
-; CHECK-NEXT:    [[CONV19:%.*]] = zext i32 [[REM18]] to i64
+; CHECK-NEXT:    [[CONV19:%.*]] = zext nneg i32 [[REM18]] to i64
 ; CHECK-NEXT:    store i32 0, ptr [[D]], align 8
 ; CHECK-NEXT:    [[R:%.*]] = icmp ult i64 [[INSERT_INSERT41]], [[CONV19]]
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[R]])
diff --git a/llvm/test/Transforms/InstCombine/zext.ll b/llvm/test/Transforms/InstCombine/zext.ll
index 9b083a6d9156c1d..edbd4850fb1190b 100644
--- a/llvm/test/Transforms/InstCombine/zext.ll
+++ b/llvm/test/Transforms/InstCombine/zext.ll
@@ -757,7 +757,7 @@ define i32  @zext_icmp_eq0_no_shift(ptr %ptr ) {
 ; CHECK-LABEL: @zext_icmp_eq0_no_shift(
 ; CHECK-NEXT:    [[X:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[X]], 1
-; CHECK-NEXT:    [[RES:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[RES:%.*]] = zext nneg i8 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[RES]]
 ;
   %X = load i8, ptr %ptr,align 1, !range !{i8 0, i8 2} ; range [0, 2)
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
index 280a3fbba297dce..da508ab5f727695 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll
@@ -731,7 +731,7 @@ define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
 ; CHECK-NEXT:    [[WIDE_LOAD1:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
 ; CHECK-NEXT:    [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD1]] to <8 x i32>
 ; CHECK-NEXT:    [[TMP4:%.*]] = mul nuw nsw <8 x i32> [[TMP3]], [[TMP1]]
-; CHECK-NEXT:    [[TMP5:%.*]] = zext <8 x i32> [[TMP4]] to <8 x i64>
+; CHECK-NEXT:    [[TMP5:%.*]] = zext nneg <8 x i32> [[TMP4]] to <8 x i64>
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> [[TMP5]])
 ; CHECK-NEXT:    [[TMP7]] = add i64 [[TMP6]], [[VEC_PHI]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
@@ -754,7 +754,7 @@ define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32
 ; CHECK-NEXT:    [[TMP10:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
 ; CHECK-NEXT:    [[CONV2:%.*]] = zext i8 [[TMP10]] to i32
 ; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[CONV2]], [[CONV]]
-; CHECK-NEXT:    [[CONV3:%.*]] = zext i32 [[MUL]] to i64
+; CHECK-NEXT:    [[CONV3:%.*]] = zext nneg i32 [[MUL]] to i64
 ; CHECK-NEXT:    [[ADD]] = add nuw nsw i64 [[R_011]], [[CONV3]]
 ; CHECK-NEXT:    [[INC]] = add nuw nsw i32 [[I_012]], 1
 ; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
index 18b05c05d9b9d21..372b9444a91cfb2 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll
@@ -1125,7 +1125,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
 ; CHECK-NEXT:    [[TMP2:%.*]] = udiv <4 x i8> [[WIDE_LOAD]], <i8 31, i8 31, i8 31, i8 31>
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl nuw nsw <4 x i8> [[TMP2]], <i8 3, i8 3, i8 3, i8 3>
 ; CHECK-NEXT:    [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], <i8 31, i8 31, i8 31, i8 31>
-; CHECK-NEXT:    [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
+; CHECK-NEXT:    [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP5]])
 ; CHECK-NEXT:    [[TMP7]] = add i32 [[TMP6]], [[VEC_PHI]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
@@ -1150,7 +1150,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h
 ; CHECK-NEXT:    [[TMP11:%.*]] = udiv i8 [[TMP10]], 31
 ; CHECK-NEXT:    [[TMP12:%.*]] = shl nuw nsw i8 [[TMP11]], 3
 ; CHECK-NEXT:    [[TMP13:%.*]] = udiv i8 [[TMP12]], 31
-; CHECK-NEXT:    [[DIV4:%.*]] = zext i8 [[TMP13]] to i32
+; CHECK-NEXT:    [[DIV4:%.*]] = zext nneg i8 [[TMP13]] to i32
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[G_016]], [[DIV4]]
 ; CHECK-NEXT:    br label [[FOR_INC5]]
 ; CHECK:       for.inc5:
@@ -1210,7 +1210,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
 ; CHECK-NEXT:    [[TMP2:%.*]] = udiv <4 x i8> [[WIDE_LOAD]], <i8 31, i8 31, i8 31, i8 31>
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl nuw nsw <4 x i8> [[TMP2]], <i8 3, i8 3, i8 3, i8 3>
 ; CHECK-NEXT:    [[TMP4:%.*]] = udiv <4 x i8> [[TMP3]], <i8 31, i8 31, i8 31, i8 31>
-; CHECK-NEXT:    [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
+; CHECK-NEXT:    [[TMP5:%.*]] = zext nneg <4 x i8> [[TMP4]] to <4 x i32>
 ; CHECK-NEXT:    [[TMP6:%.*]] = select <4 x i1> [[DOTNOT]], <4 x i32> zeroinitializer, <4 x i32> [[TMP5]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP6]])
 ; CHECK-NEXT:    [[TMP8:%.*]] = add i32 [[TMP7]], [[VEC_PHI]]
@@ -1240,7 +1240,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read
 ; CHECK-NEXT:    [[TMP16:%.*]] = shl nuw nsw i8 [[TMP15]], 3
 ; CHECK-NEXT:    [[TMP17:%.*]] = udiv i8 [[TMP16]], 31
 ; CHECK-NEXT:    [[TMP18:%.*]] = shl nuw nsw i8 [[TMP17]], 1
-; CHECK-NEXT:    [[REASS_ADD:%.*]] = zext i8 [[TMP18]] to i32
+; CHECK-NEXT:    [[REASS_ADD:%.*]] = zext nneg i8 [[TMP18]] to i32
 ; CHECK-NEXT:    [[ADD:%.*]] = add i32 [[G_016]], [[REASS_ADD]]
 ; CHECK-NEXT:    br label [[FOR_INC5]]
 ; CHECK:       for.inc5:
@@ -1359,7 +1359,7 @@ define i32 @predicated_or_dominates_reduction(ptr %b) {
 ; CHECK-NEXT:    [[TMP47:%.*]] = or <4 x i1> [[TMP45]], [[TMP46]]
 ; CHECK-NEXT:    [[TMP48:%.*]] = bitcast <4 x i1> [[TMP47]] to i4
 ; CHECK-NEXT:    [[TMP49:%.*]] = call i4 @llvm.ctpop.i4(i4 [[TMP48]]), !range [[RNG42:![0-9]+]]
-; CHECK-NEXT:    [[TMP50:%.*]] = zext i4 [[TMP49]] to i32
+; CHECK-NEXT:    [[TMP50:%.*]] = zext nneg i4 [[TMP49]] to i32
 ; CHECK-NEXT:    [[TMP51]] = add i32 [[VEC_PHI]], [[TMP50]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP52:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000



More information about the cfe-commits mailing list