[clang] b109477 - [InstCombine] Infer nsw/nuw for trunc (#87910)

via cfe-commits cfe-commits at lists.llvm.org
Thu Apr 11 04:10:57 PDT 2024


Author: Yingwei Zheng
Date: 2024-04-11T19:10:53+08:00
New Revision: b1094776152b68efa05f69b7b833f9cbc0727efc

URL: https://github.com/llvm/llvm-project/commit/b1094776152b68efa05f69b7b833f9cbc0727efc
DIFF: https://github.com/llvm/llvm-project/commit/b1094776152b68efa05f69b7b833f9cbc0727efc.diff

LOG: [InstCombine] Infer nsw/nuw for trunc (#87910)

This patch adds support for inferring trunc's nsw/nuw flags.

Added: 
    

Modified: 
    clang/test/CodeGen/ms-intrinsics-other.c
    clang/test/CodeGen/ms-intrinsics.c
    clang/test/CodeGenOpenCL/builtins-amdgcn.cl
    clang/test/Headers/__clang_hip_math.hip
    llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
    llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
    llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvlimax-knownbits.ll
    llvm/test/Transforms/InstCombine/add.ll
    llvm/test/Transforms/InstCombine/binop-itofp.ll
    llvm/test/Transforms/InstCombine/bswap-fold.ll
    llvm/test/Transforms/InstCombine/bswap.ll
    llvm/test/Transforms/InstCombine/cast.ll
    llvm/test/Transforms/InstCombine/cmp-intrinsic.ll
    llvm/test/Transforms/InstCombine/compare-signs.ll
    llvm/test/Transforms/InstCombine/ctpop.ll
    llvm/test/Transforms/InstCombine/extractelement-inseltpoison.ll
    llvm/test/Transforms/InstCombine/extractelement.ll
    llvm/test/Transforms/InstCombine/ffs-1.ll
    llvm/test/Transforms/InstCombine/fls.ll
    llvm/test/Transforms/InstCombine/fold-log2-ceil-idiom.ll
    llvm/test/Transforms/InstCombine/high-bit-signmask-with-trunc.ll
    llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
    llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
    llvm/test/Transforms/InstCombine/icmp-topbitssame.ll
    llvm/test/Transforms/InstCombine/insert-trunc.ll
    llvm/test/Transforms/InstCombine/insertelt-trunc.ll
    llvm/test/Transforms/InstCombine/known-bits.ll
    llvm/test/Transforms/InstCombine/known-non-zero.ll
    llvm/test/Transforms/InstCombine/known-phi-recurse.ll
    llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll
    llvm/test/Transforms/InstCombine/logical-select.ll
    llvm/test/Transforms/InstCombine/lshr-trunc-sext-to-ashr-sext.ll
    llvm/test/Transforms/InstCombine/lshr.ll
    llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
    llvm/test/Transforms/InstCombine/narrow.ll
    llvm/test/Transforms/InstCombine/negated-bitmask.ll
    llvm/test/Transforms/InstCombine/pr34349.ll
    llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
    llvm/test/Transforms/InstCombine/sadd_sat.ll
    llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
    llvm/test/Transforms/InstCombine/select-imm-canon.ll
    llvm/test/Transforms/InstCombine/select.ll
    llvm/test/Transforms/InstCombine/sext-of-trunc-nsw.ll
    llvm/test/Transforms/InstCombine/sext.ll
    llvm/test/Transforms/InstCombine/shift-add.ll
    llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll
    llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll
    llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll
    llvm/test/Transforms/InstCombine/shift-shift.ll
    llvm/test/Transforms/InstCombine/shift.ll
    llvm/test/Transforms/InstCombine/shl-demand.ll
    llvm/test/Transforms/InstCombine/sign-bit-test-via-right-shifting-all-other-bits.ll
    llvm/test/Transforms/InstCombine/trunc-demand.ll
    llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
    llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll
    llvm/test/Transforms/InstCombine/trunc.ll
    llvm/test/Transforms/InstCombine/truncating-saturate.ll
    llvm/test/Transforms/InstCombine/vector-trunc.ll
    llvm/test/Transforms/InstCombine/xor-ashr.ll
    llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll
    llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll
    llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll
    llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll
    llvm/test/Transforms/LoopVectorize/reduction.ll
    llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
    llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/ms-intrinsics-other.c b/clang/test/CodeGen/ms-intrinsics-other.c
index 36c40dddcbb4f5..0e9dfe34b84cc7 100644
--- a/clang/test/CodeGen/ms-intrinsics-other.c
+++ b/clang/test/CodeGen/ms-intrinsics-other.c
@@ -87,7 +87,7 @@ unsigned char test_BitScanForward64(unsigned LONG *Index, unsigned __int64 Mask)
 // CHECK:   ret i8 [[RESULT]]
 // CHECK:   [[ISNOTZERO_LABEL]]:
 // CHECK:   [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true)
-// CHECK:   [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32
+// CHECK:   [[TRUNC_INDEX:%[0-9]+]] = trunc nuw nsw i64 [[INDEX]] to i32
 // CHECK:   store i32 [[TRUNC_INDEX]], ptr %Index, align 4
 // CHECK:   br label %[[END_LABEL]]
 
@@ -102,7 +102,7 @@ unsigned char test_BitScanReverse64(unsigned LONG *Index, unsigned __int64 Mask)
 // CHECK:   ret i8 [[RESULT]]
 // CHECK:   [[ISNOTZERO_LABEL]]:
 // CHECK:   [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
-// CHECK:   [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32
+// CHECK:   [[TRUNC_REVINDEX:%[0-9]+]] = trunc nuw nsw i64 [[REVINDEX]] to i32
 // CHECK:   [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
 // CHECK:   store i32 [[INDEX]], ptr %Index, align 4
 // CHECK:   br label %[[END_LABEL]]

diff  --git a/clang/test/CodeGen/ms-intrinsics.c b/clang/test/CodeGen/ms-intrinsics.c
index 5bb003d1f91fc0..6eabd725e2f7c6 100644
--- a/clang/test/CodeGen/ms-intrinsics.c
+++ b/clang/test/CodeGen/ms-intrinsics.c
@@ -189,7 +189,7 @@ unsigned char test_BitScanForward64(unsigned long *Index, unsigned __int64 Mask)
 // CHECK-ARM-X64:   ret i8 [[RESULT]]
 // CHECK-ARM-X64:   [[ISNOTZERO_LABEL]]:
 // CHECK-ARM-X64:   [[INDEX:%[0-9]+]] = tail call i64 @llvm.cttz.i64(i64 %Mask, i1 true)
-// CHECK-ARM-X64:   [[TRUNC_INDEX:%[0-9]+]] = trunc i64 [[INDEX]] to i32
+// CHECK-ARM-X64:   [[TRUNC_INDEX:%[0-9]+]] = trunc nuw nsw i64 [[INDEX]] to i32
 // CHECK-ARM-X64:   store i32 [[TRUNC_INDEX]], ptr %Index, align 4
 // CHECK-ARM-X64:   br label %[[END_LABEL]]
 
@@ -204,7 +204,7 @@ unsigned char test_BitScanReverse64(unsigned long *Index, unsigned __int64 Mask)
 // CHECK-ARM-X64:   ret i8 [[RESULT]]
 // CHECK-ARM-X64:   [[ISNOTZERO_LABEL]]:
 // CHECK-ARM-X64:   [[REVINDEX:%[0-9]+]] = tail call i64 @llvm.ctlz.i64(i64 %Mask, i1 true)
-// CHECK-ARM-X64:   [[TRUNC_REVINDEX:%[0-9]+]] = trunc i64 [[REVINDEX]] to i32
+// CHECK-ARM-X64:   [[TRUNC_REVINDEX:%[0-9]+]] = trunc nuw nsw i64 [[REVINDEX]] to i32
 // CHECK-ARM-X64:   [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63
 // CHECK-ARM-X64:   store i32 [[INDEX]], ptr %Index, align 4
 // CHECK-ARM-X64:   br label %[[END_LABEL]]

diff  --git a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
index 8a4533633706b2..bdca97c8878670 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn.cl
@@ -528,7 +528,7 @@ void test_read_exec_lo(global uint* out) {
 // CHECK-LABEL: @test_read_exec_hi(
 // CHECK: call i64 @llvm.amdgcn.ballot.i64(i1 true)
 // CHECK: lshr i64 [[A:%.*]], 32
-// CHECK: trunc i64 [[B:%.*]] to i32
+// CHECK: trunc nuw i64 [[B:%.*]] to i32
 void test_read_exec_hi(global uint* out) {
   *out = __builtin_amdgcn_read_exec_hi();
 }

diff  --git a/clang/test/Headers/__clang_hip_math.hip b/clang/test/Headers/__clang_hip_math.hip
index 37099de74fb8ec..2e5f521a5feaed 100644
--- a/clang/test/Headers/__clang_hip_math.hip
+++ b/clang/test/Headers/__clang_hip_math.hip
@@ -3703,7 +3703,7 @@ extern "C" __device__ BOOL_TYPE test___signbitf(float x) {
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast double [[X:%.*]] to i64
 // CHECK-NEXT:    [[DOTLOBIT:%.*]] = lshr i64 [[TMP0]], 63
-// CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[DOTLOBIT]] to i32
+// CHECK-NEXT:    [[CONV:%.*]] = trunc nuw nsw i64 [[DOTLOBIT]] to i32
 // CHECK-NEXT:    ret i32 [[CONV]]
 //
 extern "C" __device__ BOOL_TYPE test___signbit(double x) {

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 0652a8ba80b3fe..437e9b92c7032f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -897,7 +897,20 @@ Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
     }
   }
 
-  return nullptr;
+  bool Changed = false;
+  if (!Trunc.hasNoSignedWrap() &&
+      ComputeMaxSignificantBits(Src, /*Depth=*/0, &Trunc) <= DestWidth) {
+    Trunc.setHasNoSignedWrap(true);
+    Changed = true;
+  }
+  if (!Trunc.hasNoUnsignedWrap() &&
+      MaskedValueIsZero(Src, APInt::getBitsSetFrom(SrcWidth, DestWidth),
+                        /*Depth=*/0, &Trunc)) {
+    Trunc.setHasNoUnsignedWrap(true);
+    Changed = true;
+  }
+
+  return Changed ? &Trunc : nullptr;
 }
 
 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp,

diff  --git a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
index 1afae6565fe26b..6e0acfd6851165 100644
--- a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
+++ b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
@@ -45,7 +45,7 @@ entry:
 define signext i32 @vsetvl_sext() nounwind #0 {
 ; CHECK-LABEL: @vsetvl_sext(
 ; CHECK-NEXT:    [[A:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
-; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A]] to i32
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw nsw i64 [[A]] to i32
 ; CHECK-NEXT:    ret i32 [[B]]
 ;
   %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)
@@ -56,7 +56,7 @@ define signext i32 @vsetvl_sext() nounwind #0 {
 define zeroext i32 @vsetvl_zext() nounwind #0 {
 ; CHECK-LABEL: @vsetvl_zext(
 ; CHECK-NEXT:    [[A:%.*]] = call i64 @llvm.riscv.vsetvli.i64(i64 1, i64 1, i64 1)
-; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A]] to i32
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw nsw i64 [[A]] to i32
 ; CHECK-NEXT:    ret i32 [[B]]
 ;
   %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1)

diff  --git a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvlimax-knownbits.ll b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvlimax-knownbits.ll
index 093ba75e87b5a7..811a29c7e56248 100644
--- a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvlimax-knownbits.ll
+++ b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvlimax-knownbits.ll
@@ -45,7 +45,7 @@ entry:
 define signext i32 @vsetvlmax_sext() nounwind #0 {
 ; CHECK-LABEL: @vsetvlmax_sext(
 ; CHECK-NEXT:    [[A:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
-; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A]] to i32
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw nsw i64 [[A]] to i32
 ; CHECK-NEXT:    ret i32 [[B]]
 ;
   %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 1)
@@ -56,7 +56,7 @@ define signext i32 @vsetvlmax_sext() nounwind #0 {
 define zeroext i32 @vsetvlmax_zext() nounwind #0 {
 ; CHECK-LABEL: @vsetvlmax_zext(
 ; CHECK-NEXT:    [[A:%.*]] = call i64 @llvm.riscv.vsetvlimax.i64(i64 1, i64 1)
-; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A]] to i32
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw nsw i64 [[A]] to i32
 ; CHECK-NEXT:    ret i32 [[B]]
 ;
   %a = call i64 @llvm.riscv.vsetvlimax(i64 1, i64 1)

diff  --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index ec3aca26514caf..23eee8547597e4 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -2375,7 +2375,7 @@ define { i64, i64 } @PR57576(i64 noundef %x, i64 noundef %y, i64 noundef %z, i64
 ; CHECK-NEXT:    [[SUB:%.*]] = sub i128 [[XY]], [[ZZ]]
 ; CHECK-NEXT:    [[T:%.*]] = trunc i128 [[SUB]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i128 [[SUB]], 64
-; CHECK-NEXT:    [[DOTTR:%.*]] = trunc i128 [[TMP1]] to i64
+; CHECK-NEXT:    [[DOTTR:%.*]] = trunc nuw i128 [[TMP1]] to i64
 ; CHECK-NEXT:    [[DOTNARROW:%.*]] = sub i64 [[DOTTR]], [[W:%.*]]
 ; CHECK-NEXT:    [[R1:%.*]] = insertvalue { i64, i64 } poison, i64 [[T]], 0
 ; CHECK-NEXT:    [[R2:%.*]] = insertvalue { i64, i64 } [[R1]], i64 [[DOTNARROW]], 1

diff  --git a/llvm/test/Transforms/InstCombine/binop-itofp.ll b/llvm/test/Transforms/InstCombine/binop-itofp.ll
index cd9ec1e59203ff..d72a54e8babc9f 100644
--- a/llvm/test/Transforms/InstCombine/binop-itofp.ll
+++ b/llvm/test/Transforms/InstCombine/binop-itofp.ll
@@ -1010,7 +1010,7 @@ define float @test_ui_add_with_signed_constant(i32 %shr.i) {
 define float @missed_nonzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g_2345) {
 ; CHECK-LABEL: @missed_nonzero_check_on_constant_for_si_fmul(
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw i32 [[SEL]] to i16
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = sitofp i16 [[CONV_I]] to float
 ; CHECK-NEXT:    [[MUL3_I_I:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[CONV1_I]])
 ; CHECK-NEXT:    store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
@@ -1027,7 +1027,7 @@ define float @missed_nonzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g
 define <2 x float> @missed_nonzero_check_on_constant_for_si_fmul_vec(i1 %c, i1 %.b, ptr %g_2345) {
 ; CHECK-LABEL: @missed_nonzero_check_on_constant_for_si_fmul_vec(
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
-; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
 ; CHECK-NEXT:    [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
 ; CHECK-NEXT:    [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
@@ -1048,7 +1048,7 @@ define <2 x float> @missed_nonzero_check_on_constant_for_si_fmul_vec(i1 %c, i1 %
 define float @negzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g_2345) {
 ; CHECK-LABEL: @negzero_check_on_constant_for_si_fmul(
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw i32 [[SEL]] to i16
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = sitofp i16 [[CONV_I]] to float
 ; CHECK-NEXT:    [[TMP1:%.*]] = fneg float [[CONV1_I]]
 ; CHECK-NEXT:    [[MUL3_I_I:%.*]] = call float @llvm.copysign.f32(float 0.000000e+00, float [[TMP1]])
@@ -1066,7 +1066,7 @@ define float @negzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g_2345)
 define <2 x float> @nonzero_check_on_constant_for_si_fmul_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
 ; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_vec_w_undef(
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
-; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
 ; CHECK-NEXT:    [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
 ; CHECK-NEXT:    [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
@@ -1087,7 +1087,7 @@ define <2 x float> @nonzero_check_on_constant_for_si_fmul_vec_w_undef(i1 %c, i1
 define <2 x float> @nonzero_check_on_constant_for_si_fmul_nz_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
 ; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_nz_vec_w_undef(
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
-; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
 ; CHECK-NEXT:    [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
 ; CHECK-NEXT:    [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
@@ -1108,7 +1108,7 @@ define <2 x float> @nonzero_check_on_constant_for_si_fmul_nz_vec_w_undef(i1 %c,
 define <2 x float> @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
 ; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(
 ; CHECK-NEXT:    [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
-; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc i32 [[SEL]] to i16
+; CHECK-NEXT:    [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
 ; CHECK-NEXT:    [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
 ; CHECK-NEXT:    [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>

diff  --git a/llvm/test/Transforms/InstCombine/bswap-fold.ll b/llvm/test/Transforms/InstCombine/bswap-fold.ll
index 05933d37057cce..19522168beaf5e 100644
--- a/llvm/test/Transforms/InstCombine/bswap-fold.ll
+++ b/llvm/test/Transforms/InstCombine/bswap-fold.ll
@@ -211,7 +211,7 @@ define i64 @variable_shl_not_masked_enough_i64(i64 %x, i64 %n) {
 define i16 @test7(i32 %A) {
 ; CHECK-LABEL: @test7(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[A:%.*]], 16
-; CHECK-NEXT:    [[D:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    [[D:%.*]] = trunc nuw i32 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[D]]
 ;
   %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
@@ -223,7 +223,7 @@ define i16 @test7(i32 %A) {
 define <2 x i16> @test7_vector(<2 x i32> %A) {
 ; CHECK-LABEL: @test7_vector(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[A:%.*]], <i32 16, i32 16>
-; CHECK-NEXT:    [[D:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i16>
+; CHECK-NEXT:    [[D:%.*]] = trunc nuw <2 x i32> [[TMP1]] to <2 x i16>
 ; CHECK-NEXT:    ret <2 x i16> [[D]]
 ;
   %B = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %A) nounwind
@@ -235,7 +235,7 @@ define <2 x i16> @test7_vector(<2 x i32> %A) {
 define i16 @test8(i64 %A) {
 ; CHECK-LABEL: @test8(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 48
-; CHECK-NEXT:    [[D:%.*]] = trunc i64 [[TMP1]] to i16
+; CHECK-NEXT:    [[D:%.*]] = trunc nuw i64 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[D]]
 ;
   %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
@@ -247,7 +247,7 @@ define i16 @test8(i64 %A) {
 define <2 x i16> @test8_vector(<2 x i64> %A) {
 ; CHECK-LABEL: @test8_vector(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i64> [[A:%.*]], <i64 48, i64 48>
-; CHECK-NEXT:    [[D:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i16>
+; CHECK-NEXT:    [[D:%.*]] = trunc nuw <2 x i64> [[TMP1]] to <2 x i16>
 ; CHECK-NEXT:    ret <2 x i16> [[D]]
 ;
   %B = tail call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %A) nounwind

diff  --git a/llvm/test/Transforms/InstCombine/bswap.ll b/llvm/test/Transforms/InstCombine/bswap.ll
index 21eb170b8c58d3..d42583bb5699b7 100644
--- a/llvm/test/Transforms/InstCombine/bswap.ll
+++ b/llvm/test/Transforms/InstCombine/bswap.ll
@@ -43,7 +43,7 @@ define i16 @test1_trunc(i32 %i) {
 ; CHECK-NEXT:    [[T3:%.*]] = lshr i32 [[I]], 8
 ; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T3]], 65280
 ; CHECK-NEXT:    [[T5:%.*]] = or disjoint i32 [[T1]], [[T4]]
-; CHECK-NEXT:    [[T13:%.*]] = trunc i32 [[T5]] to i16
+; CHECK-NEXT:    [[T13:%.*]] = trunc nuw i32 [[T5]] to i16
 ; CHECK-NEXT:    ret i16 [[T13]]
 ;
   %t1 = lshr i32 %i, 24
@@ -61,7 +61,7 @@ define i16 @test1_trunc_extra_use(i32 %i) {
 ; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T3]], 65280
 ; CHECK-NEXT:    [[T5:%.*]] = or disjoint i32 [[T1]], [[T4]]
 ; CHECK-NEXT:    call void @extra_use(i32 [[T5]])
-; CHECK-NEXT:    [[T13:%.*]] = trunc i32 [[T5]] to i16
+; CHECK-NEXT:    [[T13:%.*]] = trunc nuw i32 [[T5]] to i16
 ; CHECK-NEXT:    ret i16 [[T13]]
 ;
   %t1 = lshr i32 %i, 24

diff  --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll
index 97554e9462043c..d9c93ba277295c 100644
--- a/llvm/test/Transforms/InstCombine/cast.ll
+++ b/llvm/test/Transforms/InstCombine/cast.ll
@@ -1471,7 +1471,7 @@ define i64 @test91(i64 %A) {
 ; ALL-LABEL: @test91(
 ; ALL-NEXT:    [[B:%.*]] = sext i64 [[A:%.*]] to i96
 ; ALL-NEXT:    [[C:%.*]] = lshr i96 [[B]], 48
-; ALL-NEXT:    [[D:%.*]] = trunc i96 [[C]] to i64
+; ALL-NEXT:    [[D:%.*]] = trunc nuw nsw i96 [[C]] to i64
 ; ALL-NEXT:    ret i64 [[D]]
 ;
   %B = sext i64 %A to i96
@@ -1676,7 +1676,7 @@ define i8 @trunc_lshr_overshift_sext_uses3(i8 %A) {
 define i8 @trunc_lshr_sext_wide_input(i16 %A) {
 ; ALL-LABEL: @trunc_lshr_sext_wide_input(
 ; ALL-NEXT:    [[TMP1:%.*]] = ashr i16 [[A:%.*]], 9
-; ALL-NEXT:    [[D:%.*]] = trunc i16 [[TMP1]] to i8
+; ALL-NEXT:    [[D:%.*]] = trunc nsw i16 [[TMP1]] to i8
 ; ALL-NEXT:    ret i8 [[D]]
 ;
   %B = sext i16 %A to i32
@@ -1688,7 +1688,7 @@ define i8 @trunc_lshr_sext_wide_input(i16 %A) {
 define i8 @trunc_lshr_sext_wide_input_exact(i16 %A) {
 ; ALL-LABEL: @trunc_lshr_sext_wide_input_exact(
 ; ALL-NEXT:    [[TMP1:%.*]] = ashr exact i16 [[A:%.*]], 9
-; ALL-NEXT:    [[D:%.*]] = trunc i16 [[TMP1]] to i8
+; ALL-NEXT:    [[D:%.*]] = trunc nsw i16 [[TMP1]] to i8
 ; ALL-NEXT:    ret i8 [[D]]
 ;
   %B = sext i16 %A to i32
@@ -1702,7 +1702,7 @@ define <2 x i8> @trunc_lshr_sext_wide_input_uses1(<2 x i16> %A) {
 ; ALL-NEXT:    [[B:%.*]] = sext <2 x i16> [[A:%.*]] to <2 x i32>
 ; ALL-NEXT:    call void @use_v2i32(<2 x i32> [[B]])
 ; ALL-NEXT:    [[TMP1:%.*]] = ashr <2 x i16> [[A]], <i16 9, i16 9>
-; ALL-NEXT:    [[D:%.*]] = trunc <2 x i16> [[TMP1]] to <2 x i8>
+; ALL-NEXT:    [[D:%.*]] = trunc nsw <2 x i16> [[TMP1]] to <2 x i8>
 ; ALL-NEXT:    ret <2 x i8> [[D]]
 ;
   %B = sext <2 x i16> %A to <2 x i32>
@@ -1747,7 +1747,7 @@ define <2 x i8> @trunc_lshr_sext_wide_input_uses3(<2 x i16> %A) {
 define <2 x i8> @trunc_lshr_overshift_wide_input_sext(<2 x i16> %A) {
 ; ALL-LABEL: @trunc_lshr_overshift_wide_input_sext(
 ; ALL-NEXT:    [[TMP1:%.*]] = ashr <2 x i16> [[A:%.*]], <i16 15, i16 15>
-; ALL-NEXT:    [[D:%.*]] = trunc <2 x i16> [[TMP1]] to <2 x i8>
+; ALL-NEXT:    [[D:%.*]] = trunc nsw <2 x i16> [[TMP1]] to <2 x i8>
 ; ALL-NEXT:    ret <2 x i8> [[D]]
 ;
   %B = sext <2 x i16> %A to <2 x i32>
@@ -1761,7 +1761,7 @@ define i8 @trunc_lshr_overshift_sext_wide_input_uses1(i16 %A) {
 ; ALL-NEXT:    [[B:%.*]] = sext i16 [[A:%.*]] to i32
 ; ALL-NEXT:    call void @use_i32(i32 [[B]])
 ; ALL-NEXT:    [[TMP1:%.*]] = ashr i16 [[A]], 15
-; ALL-NEXT:    [[D:%.*]] = trunc i16 [[TMP1]] to i8
+; ALL-NEXT:    [[D:%.*]] = trunc nsw i16 [[TMP1]] to i8
 ; ALL-NEXT:    ret i8 [[D]]
 ;
   %B = sext i16 %A to i32
@@ -1776,7 +1776,7 @@ define <2 x i8> @trunc_lshr_overshift_sext_wide_input_uses2(<2 x i16> %A) {
 ; ALL-NEXT:    [[TMP1:%.*]] = ashr <2 x i16> [[A:%.*]], <i16 15, i16 15>
 ; ALL-NEXT:    [[C:%.*]] = zext <2 x i16> [[TMP1]] to <2 x i32>
 ; ALL-NEXT:    call void @use_v2i32(<2 x i32> [[C]])
-; ALL-NEXT:    [[D:%.*]] = trunc <2 x i16> [[TMP1]] to <2 x i8>
+; ALL-NEXT:    [[D:%.*]] = trunc nsw <2 x i16> [[TMP1]] to <2 x i8>
 ; ALL-NEXT:    ret <2 x i8> [[D]]
 ;
   %B = sext <2 x i16> %A to <2 x i32>
@@ -1925,7 +1925,7 @@ define <2 x i8> @trunc_lshr_overshift2_sext(<2 x i8> %A) {
 ; ALL-LABEL: @trunc_lshr_overshift2_sext(
 ; ALL-NEXT:    [[B:%.*]] = sext <2 x i8> [[A:%.*]] to <2 x i32>
 ; ALL-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 25, i32 25>
-; ALL-NEXT:    [[D:%.*]] = trunc <2 x i32> [[C]] to <2 x i8>
+; ALL-NEXT:    [[D:%.*]] = trunc nuw nsw <2 x i32> [[C]] to <2 x i8>
 ; ALL-NEXT:    ret <2 x i8> [[D]]
 ;
   %B = sext <2 x i8> %A to <2 x i32>
@@ -1939,7 +1939,7 @@ define i8 @trunc_lshr_overshift2_sext_uses1(i8 %A) {
 ; ALL-NEXT:    [[B:%.*]] = sext i8 [[A:%.*]] to i32
 ; ALL-NEXT:    call void @use_i32(i32 [[B]])
 ; ALL-NEXT:    [[C:%.*]] = lshr i32 [[B]], 25
-; ALL-NEXT:    [[D:%.*]] = trunc i32 [[C]] to i8
+; ALL-NEXT:    [[D:%.*]] = trunc nuw nsw i32 [[C]] to i8
 ; ALL-NEXT:    ret i8 [[D]]
 ;
   %B = sext i8 %A to i32
@@ -1954,7 +1954,7 @@ define <2 x i8> @trunc_lshr_overshift2_sext_uses2(<2 x i8> %A) {
 ; ALL-NEXT:    [[B:%.*]] = sext <2 x i8> [[A:%.*]] to <2 x i32>
 ; ALL-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 25, i32 25>
 ; ALL-NEXT:    call void @use_v2i32(<2 x i32> [[C]])
-; ALL-NEXT:    [[D:%.*]] = trunc <2 x i32> [[C]] to <2 x i8>
+; ALL-NEXT:    [[D:%.*]] = trunc nuw nsw <2 x i32> [[C]] to <2 x i8>
 ; ALL-NEXT:    ret <2 x i8> [[D]]
 ;
   %B = sext <2 x i8> %A to <2 x i32>
@@ -1970,7 +1970,7 @@ define i8 @trunc_lshr_overshift2_sext_uses3(i8 %A) {
 ; ALL-NEXT:    call void @use_i32(i32 [[B]])
 ; ALL-NEXT:    [[C:%.*]] = lshr i32 [[B]], 25
 ; ALL-NEXT:    call void @use_i32(i32 [[C]])
-; ALL-NEXT:    [[D:%.*]] = trunc i32 [[C]] to i8
+; ALL-NEXT:    [[D:%.*]] = trunc nuw nsw i32 [[C]] to i8
 ; ALL-NEXT:    ret i8 [[D]]
 ;
   %B = sext i8 %A to i32
@@ -2018,7 +2018,7 @@ define <2 x i8> @trunc_lshr_zext_uniform_undef(<2 x i8> %A) {
 ; ALL-LABEL: @trunc_lshr_zext_uniform_undef(
 ; ALL-NEXT:    [[B:%.*]] = zext <2 x i8> [[A:%.*]] to <2 x i32>
 ; ALL-NEXT:    [[C:%.*]] = lshr <2 x i32> [[B]], <i32 6, i32 undef>
-; ALL-NEXT:    [[D:%.*]] = trunc <2 x i32> [[C]] to <2 x i8>
+; ALL-NEXT:    [[D:%.*]] = trunc nuw <2 x i32> [[C]] to <2 x i8>
 ; ALL-NEXT:    ret <2 x i8> [[D]]
 ;
   %B = zext <2 x i8> %A to <2 x i32>
@@ -2042,7 +2042,7 @@ define <3 x i8> @trunc_lshr_zext_nonuniform_undef(<3 x i8> %A) {
 ; ALL-LABEL: @trunc_lshr_zext_nonuniform_undef(
 ; ALL-NEXT:    [[B:%.*]] = zext <3 x i8> [[A:%.*]] to <3 x i32>
 ; ALL-NEXT:    [[C:%.*]] = lshr <3 x i32> [[B]], <i32 6, i32 2, i32 undef>
-; ALL-NEXT:    [[D:%.*]] = trunc <3 x i32> [[C]] to <3 x i8>
+; ALL-NEXT:    [[D:%.*]] = trunc nuw <3 x i32> [[C]] to <3 x i8>
 ; ALL-NEXT:    ret <3 x i8> [[D]]
 ;
   %B = zext <3 x i8> %A to <3 x i32>
@@ -2095,7 +2095,7 @@ define i4 @pr33078_3(i8 %A) {
 ; ALL-LABEL: @pr33078_3(
 ; ALL-NEXT:    [[B:%.*]] = sext i8 [[A:%.*]] to i16
 ; ALL-NEXT:    [[C:%.*]] = lshr i16 [[B]], 12
-; ALL-NEXT:    [[D:%.*]] = trunc i16 [[C]] to i4
+; ALL-NEXT:    [[D:%.*]] = trunc nuw i16 [[C]] to i4
 ; ALL-NEXT:    ret i4 [[D]]
 ;
   %B = sext i8 %A to i16
@@ -2109,7 +2109,7 @@ define i8 @pr33078_4(i3 %x) {
 ; ALL-LABEL: @pr33078_4(
 ; ALL-NEXT:    [[B:%.*]] = sext i3 [[X:%.*]] to i16
 ; ALL-NEXT:    [[C:%.*]] = lshr i16 [[B]], 13
-; ALL-NEXT:    [[D:%.*]] = trunc i16 [[C]] to i8
+; ALL-NEXT:    [[D:%.*]] = trunc nuw nsw i16 [[C]] to i8
 ; ALL-NEXT:    ret i8 [[D]]
 ;
   %B = sext i3 %x to i16

diff  --git a/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll b/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll
index 5955650167c21a..66cbb2636cbc2b 100644
--- a/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll
+++ b/llvm/test/Transforms/InstCombine/cmp-intrinsic.ll
@@ -618,7 +618,7 @@ define i1 @trunc_cttz_false_ult_other_i32_i6(i32 %x) {
 define i1 @trunc_cttz_false_ult_other_i32_i6_extra_use(i32 %x) {
 ; CHECK-LABEL: @trunc_cttz_false_ult_other_i32_i6_extra_use(
 ; CHECK-NEXT:    [[TZ:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG0]]
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[TZ]] to i6
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw i32 [[TZ]] to i6
 ; CHECK-NEXT:    call void @use6(i6 [[TRUNC]])
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i6 [[TRUNC]], 7
 ; CHECK-NEXT:    ret i1 [[CMP]]
@@ -720,7 +720,7 @@ define i1 @trunc_ctlz_false_ugt_other_i32_i6(i32 %x) {
 define i1 @trunc_ctlz_false_ugt_other_i32_i6_extra_use(i32 %x) {
 ; CHECK-LABEL: @trunc_ctlz_false_ugt_other_i32_i6_extra_use(
 ; CHECK-NEXT:    [[LZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range [[RNG0]]
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[LZ]] to i6
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw i32 [[LZ]] to i6
 ; CHECK-NEXT:    call void @use6(i6 [[TRUNC]])
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i6 [[TRUNC]], 4
 ; CHECK-NEXT:    ret i1 [[CMP]]

diff  --git a/llvm/test/Transforms/InstCombine/compare-signs.ll b/llvm/test/Transforms/InstCombine/compare-signs.ll
index d7aa710e1ef03c..3730d46d5f0f4b 100644
--- a/llvm/test/Transforms/InstCombine/compare-signs.ll
+++ b/llvm/test/Transforms/InstCombine/compare-signs.ll
@@ -223,7 +223,7 @@ define <2 x i1> @shift_trunc_signbit_test_vec_uses(<2 x i17> %x, ptr %p1, ptr %p
 ; CHECK-LABEL: @shift_trunc_signbit_test_vec_uses(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr <2 x i17> [[X:%.*]], <i17 4, i17 4>
 ; CHECK-NEXT:    store <2 x i17> [[SH]], ptr [[P1:%.*]], align 8
-; CHECK-NEXT:    [[TR:%.*]] = trunc <2 x i17> [[SH]] to <2 x i13>
+; CHECK-NEXT:    [[TR:%.*]] = trunc nuw <2 x i17> [[SH]] to <2 x i13>
 ; CHECK-NEXT:    store <2 x i13> [[TR]], ptr [[P2:%.*]], align 4
 ; CHECK-NEXT:    [[R:%.*]] = icmp sgt <2 x i17> [[X]], <i17 -1, i17 -1>
 ; CHECK-NEXT:    ret <2 x i1> [[R]]
@@ -255,7 +255,7 @@ define i1 @shift_trunc_wrong_shift(i32 %x) {
 define i1 @shift_trunc_wrong_cmp(i32 %x) {
 ; CHECK-LABEL: @shift_trunc_wrong_cmp(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i32 [[X:%.*]], 24
-; CHECK-NEXT:    [[TR:%.*]] = trunc i32 [[SH]] to i8
+; CHECK-NEXT:    [[TR:%.*]] = trunc nuw i32 [[SH]] to i8
 ; CHECK-NEXT:    [[R:%.*]] = icmp slt i8 [[TR]], 1
 ; CHECK-NEXT:    ret i1 [[R]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/ctpop.ll b/llvm/test/Transforms/InstCombine/ctpop.ll
index dcea5fa87479eb..27194724b7d83b 100644
--- a/llvm/test/Transforms/InstCombine/ctpop.ll
+++ b/llvm/test/Transforms/InstCombine/ctpop.ll
@@ -397,7 +397,7 @@ define i32 @parity_xor_trunc(i64 %arg, i64 %arg1) {
 ; CHECK-LABEL: @parity_xor_trunc(
 ; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[ARG1:%.*]], [[ARG:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]]), !range [[RNG5:![0-9]+]]
-; CHECK-NEXT:    [[I4:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[I4:%.*]] = trunc nuw nsw i64 [[TMP2]] to i32
 ; CHECK-NEXT:    [[I5:%.*]] = and i32 [[I4]], 1
 ; CHECK-NEXT:    ret i32 [[I5]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/extractelement-inseltpoison.ll b/llvm/test/Transforms/InstCombine/extractelement-inseltpoison.ll
index 877aa2e523a311..57e81d2da89895 100644
--- a/llvm/test/Transforms/InstCombine/extractelement-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/extractelement-inseltpoison.ll
@@ -48,7 +48,7 @@ define i32 @bitcasted_inselt_wide_source_zero_elt(i64 %x) {
 ;
 ; BE-LABEL: @bitcasted_inselt_wide_source_zero_elt(
 ; BE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 32
-; BE-NEXT:    [[R:%.*]] = trunc i64 [[TMP1]] to i32
+; BE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; BE-NEXT:    ret i32 [[R]]
 ;
   %i = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
@@ -64,7 +64,7 @@ define i16 @bitcasted_inselt_wide_source_modulo_elt(i64 %x) {
 ;
 ; BE-LABEL: @bitcasted_inselt_wide_source_modulo_elt(
 ; BE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 48
-; BE-NEXT:    [[R:%.*]] = trunc i64 [[TMP1]] to i16
+; BE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP1]] to i16
 ; BE-NEXT:    ret i16 [[R]]
 ;
   %i = insertelement <2 x i64> poison, i64 %x, i32 1
@@ -76,7 +76,7 @@ define i16 @bitcasted_inselt_wide_source_modulo_elt(i64 %x) {
 define i32 @bitcasted_inselt_wide_source_not_modulo_elt(i64 %x) {
 ; LE-LABEL: @bitcasted_inselt_wide_source_not_modulo_elt(
 ; LE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 32
-; LE-NEXT:    [[R:%.*]] = trunc i64 [[TMP1]] to i32
+; LE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; LE-NEXT:    ret i32 [[R]]
 ;
 ; BE-LABEL: @bitcasted_inselt_wide_source_not_modulo_elt(
@@ -166,7 +166,7 @@ define i8 @bitcasted_inselt_wide_source_uses(i32 %x) {
 define float @bitcasted_inselt_to_FP(i64 %x) {
 ; LE-LABEL: @bitcasted_inselt_to_FP(
 ; LE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 32
-; LE-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+; LE-NEXT:    [[TMP2:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; LE-NEXT:    [[R:%.*]] = bitcast i32 [[TMP2]] to float
 ; LE-NEXT:    ret float [[R]]
 ;
@@ -218,7 +218,7 @@ define i32 @bitcasted_inselt_from_FP(double %x) {
 ; LE-LABEL: @bitcasted_inselt_from_FP(
 ; LE-NEXT:    [[TMP1:%.*]] = bitcast double [[X:%.*]] to i64
 ; LE-NEXT:    [[TMP2:%.*]] = lshr i64 [[TMP1]], 32
-; LE-NEXT:    [[R:%.*]] = trunc i64 [[TMP2]] to i32
+; LE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP2]] to i32
 ; LE-NEXT:    ret i32 [[R]]
 ;
 ; BE-LABEL: @bitcasted_inselt_from_FP(

diff  --git a/llvm/test/Transforms/InstCombine/extractelement.ll b/llvm/test/Transforms/InstCombine/extractelement.ll
index bc5dd060a540ae..28a4702559c46c 100644
--- a/llvm/test/Transforms/InstCombine/extractelement.ll
+++ b/llvm/test/Transforms/InstCombine/extractelement.ll
@@ -50,7 +50,7 @@ define i32 @bitcasted_inselt_wide_source_zero_elt(i64 %x) {
 ;
 ; ANYBE-LABEL: @bitcasted_inselt_wide_source_zero_elt(
 ; ANYBE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 32
-; ANYBE-NEXT:    [[R:%.*]] = trunc i64 [[TMP1]] to i32
+; ANYBE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; ANYBE-NEXT:    ret i32 [[R]]
 ;
   %i = insertelement <2 x i64> zeroinitializer, i64 %x, i32 0
@@ -66,7 +66,7 @@ define i16 @bitcasted_inselt_wide_source_modulo_elt(i64 %x) {
 ;
 ; ANYBE-LABEL: @bitcasted_inselt_wide_source_modulo_elt(
 ; ANYBE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 48
-; ANYBE-NEXT:    [[R:%.*]] = trunc i64 [[TMP1]] to i16
+; ANYBE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP1]] to i16
 ; ANYBE-NEXT:    ret i16 [[R]]
 ;
   %i = insertelement <2 x i64> undef, i64 %x, i32 1
@@ -78,7 +78,7 @@ define i16 @bitcasted_inselt_wide_source_modulo_elt(i64 %x) {
 define i32 @bitcasted_inselt_wide_source_not_modulo_elt(i64 %x) {
 ; ANYLE-LABEL: @bitcasted_inselt_wide_source_not_modulo_elt(
 ; ANYLE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 32
-; ANYLE-NEXT:    [[R:%.*]] = trunc i64 [[TMP1]] to i32
+; ANYLE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; ANYLE-NEXT:    ret i32 [[R]]
 ;
 ; ANYBE-LABEL: @bitcasted_inselt_wide_source_not_modulo_elt(
@@ -168,7 +168,7 @@ define i8 @bitcasted_inselt_wide_source_uses(i32 %x) {
 define float @bitcasted_inselt_to_FP(i64 %x) {
 ; ANYLE-LABEL: @bitcasted_inselt_to_FP(
 ; ANYLE-NEXT:    [[TMP1:%.*]] = lshr i64 [[X:%.*]], 32
-; ANYLE-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
+; ANYLE-NEXT:    [[TMP2:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; ANYLE-NEXT:    [[R:%.*]] = bitcast i32 [[TMP2]] to float
 ; ANYLE-NEXT:    ret float [[R]]
 ;
@@ -220,7 +220,7 @@ define i32 @bitcasted_inselt_from_FP(double %x) {
 ; ANYLE-LABEL: @bitcasted_inselt_from_FP(
 ; ANYLE-NEXT:    [[TMP1:%.*]] = bitcast double [[X:%.*]] to i64
 ; ANYLE-NEXT:    [[TMP2:%.*]] = lshr i64 [[TMP1]], 32
-; ANYLE-NEXT:    [[R:%.*]] = trunc i64 [[TMP2]] to i32
+; ANYLE-NEXT:    [[R:%.*]] = trunc nuw i64 [[TMP2]] to i32
 ; ANYLE-NEXT:    ret i32 [[R]]
 ;
 ; ANYBE-LABEL: @bitcasted_inselt_from_FP(
@@ -341,7 +341,7 @@ define i8 @bitcast_scalar_supported_type_index0(i32 %x) {
 ;
 ; ANYBE-LABEL: @bitcast_scalar_supported_type_index0(
 ; ANYBE-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i32 [[X:%.*]], 24
-; ANYBE-NEXT:    [[R:%.*]] = trunc i32 [[EXTELT_OFFSET]] to i8
+; ANYBE-NEXT:    [[R:%.*]] = trunc nuw i32 [[EXTELT_OFFSET]] to i8
 ; ANYBE-NEXT:    ret i8 [[R]]
 ;
   %v = bitcast i32 %x to <4 x i8>
@@ -443,7 +443,7 @@ define half @bitcast_fp16vec_index0(i32 %x) {
 ;
 ; ANYBE-LABEL: @bitcast_fp16vec_index0(
 ; ANYBE-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i32 [[X:%.*]], 16
-; ANYBE-NEXT:    [[TMP1:%.*]] = trunc i32 [[EXTELT_OFFSET]] to i16
+; ANYBE-NEXT:    [[TMP1:%.*]] = trunc nuw i32 [[EXTELT_OFFSET]] to i16
 ; ANYBE-NEXT:    [[R:%.*]] = bitcast i16 [[TMP1]] to half
 ; ANYBE-NEXT:    ret half [[R]]
 ;
@@ -455,7 +455,7 @@ define half @bitcast_fp16vec_index0(i32 %x) {
 define half @bitcast_fp16vec_index1(i32 %x) {
 ; ANYLE-LABEL: @bitcast_fp16vec_index1(
 ; ANYLE-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i32 [[X:%.*]], 16
-; ANYLE-NEXT:    [[TMP1:%.*]] = trunc i32 [[EXTELT_OFFSET]] to i16
+; ANYLE-NEXT:    [[TMP1:%.*]] = trunc nuw i32 [[EXTELT_OFFSET]] to i16
 ; ANYLE-NEXT:    [[R:%.*]] = bitcast i16 [[TMP1]] to half
 ; ANYLE-NEXT:    ret half [[R]]
 ;
@@ -477,7 +477,7 @@ define bfloat @bitcast_bfp16vec_index0(i32 %x) {
 ;
 ; ANYBE-LABEL: @bitcast_bfp16vec_index0(
 ; ANYBE-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i32 [[X:%.*]], 16
-; ANYBE-NEXT:    [[TMP1:%.*]] = trunc i32 [[EXTELT_OFFSET]] to i16
+; ANYBE-NEXT:    [[TMP1:%.*]] = trunc nuw i32 [[EXTELT_OFFSET]] to i16
 ; ANYBE-NEXT:    [[R:%.*]] = bitcast i16 [[TMP1]] to bfloat
 ; ANYBE-NEXT:    ret bfloat [[R]]
 ;
@@ -489,7 +489,7 @@ define bfloat @bitcast_bfp16vec_index0(i32 %x) {
 define bfloat @bitcast_bfp16vec_index1(i32 %x) {
 ; ANYLE-LABEL: @bitcast_bfp16vec_index1(
 ; ANYLE-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i32 [[X:%.*]], 16
-; ANYLE-NEXT:    [[TMP1:%.*]] = trunc i32 [[EXTELT_OFFSET]] to i16
+; ANYLE-NEXT:    [[TMP1:%.*]] = trunc nuw i32 [[EXTELT_OFFSET]] to i16
 ; ANYLE-NEXT:    [[R:%.*]] = bitcast i16 [[TMP1]] to bfloat
 ; ANYLE-NEXT:    ret bfloat [[R]]
 ;
@@ -511,7 +511,7 @@ define float @bitcast_fp32vec_index0(i64 %x) {
 ;
 ; BE64-LABEL: @bitcast_fp32vec_index0(
 ; BE64-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i64 [[X:%.*]], 32
-; BE64-NEXT:    [[TMP1:%.*]] = trunc i64 [[EXTELT_OFFSET]] to i32
+; BE64-NEXT:    [[TMP1:%.*]] = trunc nuw i64 [[EXTELT_OFFSET]] to i32
 ; BE64-NEXT:    [[R:%.*]] = bitcast i32 [[TMP1]] to float
 ; BE64-NEXT:    ret float [[R]]
 ;
@@ -528,7 +528,7 @@ define float @bitcast_fp32vec_index0(i64 %x) {
 define float @bitcast_fp32vec_index1(i64 %x) {
 ; LE64-LABEL: @bitcast_fp32vec_index1(
 ; LE64-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i64 [[X:%.*]], 32
-; LE64-NEXT:    [[TMP1:%.*]] = trunc i64 [[EXTELT_OFFSET]] to i32
+; LE64-NEXT:    [[TMP1:%.*]] = trunc nuw i64 [[EXTELT_OFFSET]] to i32
 ; LE64-NEXT:    [[R:%.*]] = bitcast i32 [[TMP1]] to float
 ; LE64-NEXT:    ret float [[R]]
 ;
@@ -570,7 +570,7 @@ define double @bitcast_fp64vec_index0(i128 %x) {
 ;
 ; BE128-LABEL: @bitcast_fp64vec_index0(
 ; BE128-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i128 [[X:%.*]], 64
-; BE128-NEXT:    [[TMP1:%.*]] = trunc i128 [[EXTELT_OFFSET]] to i64
+; BE128-NEXT:    [[TMP1:%.*]] = trunc nuw i128 [[EXTELT_OFFSET]] to i64
 ; BE128-NEXT:    [[R:%.*]] = bitcast i64 [[TMP1]] to double
 ; BE128-NEXT:    ret double [[R]]
 ;
@@ -587,7 +587,7 @@ define double @bitcast_fp64vec_index1(i128 %x) {
 ;
 ; LE128-LABEL: @bitcast_fp64vec_index1(
 ; LE128-NEXT:    [[EXTELT_OFFSET:%.*]] = lshr i128 [[X:%.*]], 64
-; LE128-NEXT:    [[TMP1:%.*]] = trunc i128 [[EXTELT_OFFSET]] to i64
+; LE128-NEXT:    [[TMP1:%.*]] = trunc nuw i128 [[EXTELT_OFFSET]] to i64
 ; LE128-NEXT:    [[R:%.*]] = bitcast i64 [[TMP1]] to double
 ; LE128-NEXT:    ret double [[R]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/ffs-1.ll b/llvm/test/Transforms/InstCombine/ffs-1.ll
index a610376da8b591..7cf080765bb1b6 100644
--- a/llvm/test/Transforms/InstCombine/ffs-1.ll
+++ b/llvm/test/Transforms/InstCombine/ffs-1.ll
@@ -181,7 +181,7 @@ define i32 @test_simplify15(i64 %x) {
 ;
 ; TARGET-LABEL: @test_simplify15(
 ; TARGET-NEXT:    [[CTTZ:%.*]] = call i64 @llvm.cttz.i64(i64 %x, i1 true), !range !1
-; TARGET-NEXT:    [[TMP1:%.*]] = trunc i64 [[CTTZ]] to i32
+; TARGET-NEXT:    [[TMP1:%.*]] = trunc nuw nsw i64 [[CTTZ]] to i32
 ; TARGET-NEXT:    [[TMP2:%.*]] = add nuw nsw i32 [[TMP1]], 1
 ; TARGET-NEXT:    [[TMP3:%.*]] = icmp eq i64 %x, 0
 ; TARGET-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]

diff  --git a/llvm/test/Transforms/InstCombine/fls.ll b/llvm/test/Transforms/InstCombine/fls.ll
index 8b25a313b6b822..7710093e195a11 100644
--- a/llvm/test/Transforms/InstCombine/fls.ll
+++ b/llvm/test/Transforms/InstCombine/fls.ll
@@ -32,7 +32,7 @@ define i32 @myflsll() {
 define i32 @flsnotconst(i64 %z) {
 ; CHECK-LABEL: @flsnotconst(
 ; CHECK-NEXT:    [[CTLZ:%.*]] = call i64 @llvm.ctlz.i64(i64 [[Z:%.*]], i1 false), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[CTLZ]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw nsw i64 [[CTLZ]] to i32
 ; CHECK-NEXT:    [[GOO:%.*]] = sub nsw i32 64, [[TMP1]]
 ; CHECK-NEXT:    ret i32 [[GOO]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/fold-log2-ceil-idiom.ll b/llvm/test/Transforms/InstCombine/fold-log2-ceil-idiom.ll
index 434d98449f99c4..a631aacd97ff94 100644
--- a/llvm/test/Transforms/InstCombine/fold-log2-ceil-idiom.ll
+++ b/llvm/test/Transforms/InstCombine/fold-log2-ceil-idiom.ll
@@ -282,7 +282,7 @@ define i5 @log2_ceil_idiom_trunc_multiuse4(i32 %x) {
 ; CHECK-LABEL: define i5 @log2_ceil_idiom_trunc_multiuse4(
 ; CHECK-SAME: i32 [[X:%.*]]) {
 ; CHECK-NEXT:    [[CTLZ:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X]], i1 true), !range [[RNG0]]
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i32 [[CTLZ]] to i5
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw i32 [[CTLZ]] to i5
 ; CHECK-NEXT:    call void @use5(i5 [[TRUNC]])
 ; CHECK-NEXT:    [[XOR:%.*]] = xor i5 [[TRUNC]], -1
 ; CHECK-NEXT:    [[CTPOP:%.*]] = tail call i32 @llvm.ctpop.i32(i32 [[X]]), !range [[RNG0]]

diff  --git a/llvm/test/Transforms/InstCombine/high-bit-signmask-with-trunc.ll b/llvm/test/Transforms/InstCombine/high-bit-signmask-with-trunc.ll
index e87d90909e84ae..3ebab115f65439 100644
--- a/llvm/test/Transforms/InstCombine/high-bit-signmask-with-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/high-bit-signmask-with-trunc.ll
@@ -4,7 +4,7 @@
 define i32 @t0(i64 %x) {
 ; CHECK-LABEL: @t0(
 ; CHECK-NEXT:    [[T0_NEG:%.*]] = ashr i64 [[X:%.*]], 63
-; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
+; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc nsw i64 [[T0_NEG]] to i32
 ; CHECK-NEXT:    ret i32 [[T1_NEG]]
 ;
   %t0 = lshr i64 %x, 63
@@ -15,7 +15,7 @@ define i32 @t0(i64 %x) {
 define i32 @t1_exact(i64 %x) {
 ; CHECK-LABEL: @t1_exact(
 ; CHECK-NEXT:    [[T0_NEG:%.*]] = ashr exact i64 [[X:%.*]], 63
-; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
+; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc nsw i64 [[T0_NEG]] to i32
 ; CHECK-NEXT:    ret i32 [[T1_NEG]]
 ;
   %t0 = lshr exact i64 %x, 63
@@ -26,7 +26,7 @@ define i32 @t1_exact(i64 %x) {
 define i32 @t2(i64 %x) {
 ; CHECK-LABEL: @t2(
 ; CHECK-NEXT:    [[T0_NEG:%.*]] = lshr i64 [[X:%.*]], 63
-; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
+; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc nuw nsw i64 [[T0_NEG]] to i32
 ; CHECK-NEXT:    ret i32 [[T1_NEG]]
 ;
   %t0 = ashr i64 %x, 63
@@ -37,7 +37,7 @@ define i32 @t2(i64 %x) {
 define i32 @t3_exact(i64 %x) {
 ; CHECK-LABEL: @t3_exact(
 ; CHECK-NEXT:    [[T0_NEG:%.*]] = lshr exact i64 [[X:%.*]], 63
-; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
+; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc nuw nsw i64 [[T0_NEG]] to i32
 ; CHECK-NEXT:    ret i32 [[T1_NEG]]
 ;
   %t0 = ashr exact i64 %x, 63
@@ -49,7 +49,7 @@ define i32 @t3_exact(i64 %x) {
 define <2 x i32> @t4(<2 x i64> %x) {
 ; CHECK-LABEL: @t4(
 ; CHECK-NEXT:    [[T0_NEG:%.*]] = ashr <2 x i64> [[X:%.*]], <i64 63, i64 63>
-; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc <2 x i64> [[T0_NEG]] to <2 x i32>
+; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc nsw <2 x i64> [[T0_NEG]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[T1_NEG]]
 ;
   %t0 = lshr <2 x i64> %x, <i64 63, i64 63>
@@ -79,7 +79,7 @@ define i32 @t6(i64 %x) {
 ; CHECK-NEXT:    [[T0_NEG:%.*]] = ashr i64 [[X:%.*]], 63
 ; CHECK-NEXT:    [[T0:%.*]] = lshr i64 [[X]], 63
 ; CHECK-NEXT:    call void @use64(i64 [[T0]])
-; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
+; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc nsw i64 [[T0_NEG]] to i32
 ; CHECK-NEXT:    ret i32 [[T1_NEG]]
 ;
   %t0 = lshr i64 %x, 63
@@ -92,7 +92,7 @@ define i32 @t6(i64 %x) {
 define i32 @n7(i64 %x) {
 ; CHECK-LABEL: @n7(
 ; CHECK-NEXT:    [[T0:%.*]] = lshr i64 [[X:%.*]], 63
-; CHECK-NEXT:    [[T1:%.*]] = trunc i64 [[T0]] to i32
+; CHECK-NEXT:    [[T1:%.*]] = trunc nuw nsw i64 [[T0]] to i32
 ; CHECK-NEXT:    call void @use32(i32 [[T1]])
 ; CHECK-NEXT:    [[R:%.*]] = sub nsw i32 0, [[T1]]
 ; CHECK-NEXT:    ret i32 [[R]]
@@ -108,7 +108,7 @@ define i32 @n8(i64 %x) {
 ; CHECK-LABEL: @n8(
 ; CHECK-NEXT:    [[T0:%.*]] = lshr i64 [[X:%.*]], 63
 ; CHECK-NEXT:    call void @use64(i64 [[T0]])
-; CHECK-NEXT:    [[T1:%.*]] = trunc i64 [[T0]] to i32
+; CHECK-NEXT:    [[T1:%.*]] = trunc nuw nsw i64 [[T0]] to i32
 ; CHECK-NEXT:    call void @use32(i32 [[T1]])
 ; CHECK-NEXT:    [[R:%.*]] = sub nsw i32 0, [[T1]]
 ; CHECK-NEXT:    ret i32 [[R]]
@@ -124,7 +124,7 @@ define i32 @n8(i64 %x) {
 define i32 @n9(i64 %x) {
 ; CHECK-LABEL: @n9(
 ; CHECK-NEXT:    [[T0:%.*]] = lshr i64 [[X:%.*]], 62
-; CHECK-NEXT:    [[T1:%.*]] = trunc i64 [[T0]] to i32
+; CHECK-NEXT:    [[T1:%.*]] = trunc nuw nsw i64 [[T0]] to i32
 ; CHECK-NEXT:    [[R:%.*]] = sub nsw i32 0, [[T1]]
 ; CHECK-NEXT:    ret i32 [[R]]
 ;
@@ -137,7 +137,7 @@ define i32 @n9(i64 %x) {
 define i32 @n10(i64 %x) {
 ; CHECK-LABEL: @n10(
 ; CHECK-NEXT:    [[T0_NEG:%.*]] = ashr i64 [[X:%.*]], 63
-; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc i64 [[T0_NEG]] to i32
+; CHECK-NEXT:    [[T1_NEG:%.*]] = trunc nsw i64 [[T0_NEG]] to i32
 ; CHECK-NEXT:    [[R:%.*]] = add nsw i32 [[T1_NEG]], 1
 ; CHECK-NEXT:    ret i32 [[R]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
index d858c91becb570..aa23a6d27f69b7 100644
--- a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll
@@ -60,7 +60,7 @@ define void @PR33765(i8 %beth) {
 ; CHECK-NEXT:    [[CONV:%.*]] = zext i8 [[BETH:%.*]] to i32
 ; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[CONV]], [[CONV]]
 ; CHECK-NEXT:    [[TINKY:%.*]] = load i16, ptr @glob, align 2
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i32 [[MUL]] to i16
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw i32 [[MUL]] to i16
 ; CHECK-NEXT:    [[CONV14:%.*]] = and i16 [[TINKY]], [[TMP1]]
 ; CHECK-NEXT:    store i16 [[CONV14]], ptr @glob, align 2
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
index 85f67bfa335bb8..7f616bbb2a8379 100644
--- a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll
@@ -28,8 +28,8 @@ define i1 @icmp_trunc_x_trunc_y_fail_from_illegal1(i256 %x, i256 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i256 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i256 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i256 [[Y]] to i16
+; CHECK-NEXT:    [[X16:%.*]] = trunc nuw i256 [[X]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc nuw i256 [[Y]] to i16
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i16 [[X16]], [[Y16]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -49,7 +49,7 @@ define i1 @icmp_trunc_x_trunc_y_illegal_trunc_to_legal_anyways(i123 %x, i32 %y)
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i123 [[X]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw nsw i123 [[X]] to i32
 ; CHECK-NEXT:    [[R:%.*]] = icmp eq i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -89,7 +89,7 @@ define i1 @icmp_trunc_x_trunc_y_3(i64 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 256
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[X]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw nsw i64 [[X]] to i32
 ; CHECK-NEXT:    [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -109,7 +109,7 @@ define i1 @icmp_trunc_x_trunc_y_fail_maybe_dirty_upper(i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65537
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[X16:%.*]] = trunc nuw i32 [[X]] to i16
 ; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[X16]], [[Y16]]
 ; CHECK-NEXT:    ret i1 [[R]]
@@ -131,7 +131,7 @@ define i1 @icmp_trunc_x_trunc_y_fail_maybe_dirty_upper_2(i32 %x, i32 %y) {
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
 ; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i32 [[Y]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc nuw i32 [[Y]] to i16
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[X16]], [[Y16]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -151,7 +151,7 @@ define i1 @icmp_trunc_x_trunc_y_swap0(i33 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i33 [[X]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw nsw i33 [[X]] to i32
 ; CHECK-NEXT:    [[R:%.*]] = icmp ule i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -171,7 +171,7 @@ define i1 @icmp_trunc_x_trunc_y_swap1(i33 %x, i32 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i33 [[X]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw nsw i33 [[X]] to i32
 ; CHECK-NEXT:    [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -238,7 +238,7 @@ define i1 @icmp_trunc_x_zext_y_3_fail_illegal(i6 %x, i45 %y) {
 ; CHECK-NEXT:    [[Y_LB_ONLY:%.*]] = icmp ult i45 [[Y:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[Y_LB_ONLY]])
 ; CHECK-NEXT:    [[X16:%.*]] = zext i6 [[X:%.*]] to i16
-; CHECK-NEXT:    [[Y16:%.*]] = trunc i45 [[Y]] to i16
+; CHECK-NEXT:    [[Y16:%.*]] = trunc nuw i45 [[Y]] to i16
 ; CHECK-NEXT:    [[R:%.*]] = icmp ne i16 [[Y16]], [[X16]]
 ; CHECK-NEXT:    ret i1 [[R]]
 ;
@@ -254,7 +254,7 @@ define i1 @icmp_trunc_x_zext_y_fail_multiuse(i32 %x, i8 %y) {
 ; CHECK-LABEL: @icmp_trunc_x_zext_y_fail_multiuse(
 ; CHECK-NEXT:    [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536
 ; CHECK-NEXT:    call void @llvm.assume(i1 [[X_LB_ONLY]])
-; CHECK-NEXT:    [[X16:%.*]] = trunc i32 [[X]] to i16
+; CHECK-NEXT:    [[X16:%.*]] = trunc nuw i32 [[X]] to i16
 ; CHECK-NEXT:    [[Y16:%.*]] = zext i8 [[Y:%.*]] to i16
 ; CHECK-NEXT:    call void @use(i16 [[Y16]])
 ; CHECK-NEXT:    [[R:%.*]] = icmp ule i16 [[X16]], [[Y16]]

diff  --git a/llvm/test/Transforms/InstCombine/icmp-topbitssame.ll b/llvm/test/Transforms/InstCombine/icmp-topbitssame.ll
index 284dc036d11d51..4e11ecbcb88972 100644
--- a/llvm/test/Transforms/InstCombine/icmp-topbitssame.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-topbitssame.ll
@@ -128,7 +128,7 @@ define i1 @wrongimm1(i16 %add) {
 define i1 @wrongimm2(i16 %add) {
 ; CHECK-LABEL: @wrongimm2(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i16 [[ADD:%.*]], 8
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i16 [[SH]] to i8
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw i16 [[SH]] to i8
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = trunc i16 [[ADD]] to i8
 ; CHECK-NEXT:    [[SHR2_I:%.*]] = ashr i8 [[CONV1_I]], 6
 ; CHECK-NEXT:    [[CMP_NOT_I:%.*]] = icmp eq i8 [[SHR2_I]], [[CONV_I]]
@@ -145,7 +145,7 @@ define i1 @wrongimm2(i16 %add) {
 define i1 @slt(i64 %add) {
 ; CHECK-LABEL: @slt(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i64 [[ADD:%.*]], 32
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i64 [[SH]] to i32
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw i64 [[SH]] to i32
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = trunc i64 [[ADD]] to i32
 ; CHECK-NEXT:    [[SHR2_I:%.*]] = ashr i32 [[CONV1_I]], 31
 ; CHECK-NEXT:    [[CMP_NOT_I:%.*]] = icmp slt i32 [[SHR2_I]], [[CONV_I]]
@@ -182,7 +182,7 @@ define i1 @extrause_a(i16 %add) {
 define i1 @extrause_l(i16 %add) {
 ; CHECK-LABEL: @extrause_l(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i16 [[ADD:%.*]], 8
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i16 [[SH]] to i8
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw i16 [[SH]] to i8
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[ADD]], 128
 ; CHECK-NEXT:    [[CMP_NOT_I:%.*]] = icmp ult i16 [[TMP1]], 256
 ; CHECK-NEXT:    call void @use(i8 [[CONV_I]])
@@ -200,7 +200,7 @@ define i1 @extrause_l(i16 %add) {
 define i1 @extrause_la(i16 %add) {
 ; CHECK-LABEL: @extrause_la(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i16 [[ADD:%.*]], 8
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i16 [[SH]] to i8
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw i16 [[SH]] to i8
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = trunc i16 [[ADD]] to i8
 ; CHECK-NEXT:    [[SHR2_I:%.*]] = ashr i8 [[CONV1_I]], 7
 ; CHECK-NEXT:    [[CMP_NOT_I:%.*]] = icmp eq i8 [[SHR2_I]], [[CONV_I]]

diff  --git a/llvm/test/Transforms/InstCombine/insert-trunc.ll b/llvm/test/Transforms/InstCombine/insert-trunc.ll
index 3ae128e55b43b9..3a160513ccb19d 100644
--- a/llvm/test/Transforms/InstCombine/insert-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/insert-trunc.ll
@@ -146,7 +146,7 @@ define <4 x i16> @lshr_same_length_poison_basevec_be(i64 %x) {
 define <4 x i16> @lshr_same_length_poison_basevec_both_endian(i64 %x) {
 ; ALL-LABEL: @lshr_same_length_poison_basevec_both_endian(
 ; ALL-NEXT:    [[S:%.*]] = lshr i64 [[X:%.*]], 48
-; ALL-NEXT:    [[T:%.*]] = trunc i64 [[S]] to i16
+; ALL-NEXT:    [[T:%.*]] = trunc nuw i64 [[S]] to i16
 ; ALL-NEXT:    [[R:%.*]] = insertelement <4 x i16> poison, i16 [[T]], i64 0
 ; ALL-NEXT:    ret <4 x i16> [[R]]
 ;
@@ -159,7 +159,7 @@ define <4 x i16> @lshr_same_length_poison_basevec_both_endian(i64 %x) {
 define <4 x i16> @lshr_wrong_index_same_length_poison_basevec(i64 %x) {
 ; ALL-LABEL: @lshr_wrong_index_same_length_poison_basevec(
 ; ALL-NEXT:    [[S:%.*]] = lshr i64 [[X:%.*]], 48
-; ALL-NEXT:    [[T:%.*]] = trunc i64 [[S]] to i16
+; ALL-NEXT:    [[T:%.*]] = trunc nuw i64 [[S]] to i16
 ; ALL-NEXT:    [[R:%.*]] = insertelement <4 x i16> poison, i16 [[T]], i64 1
 ; ALL-NEXT:    ret <4 x i16> [[R]]
 ;
@@ -172,7 +172,7 @@ define <4 x i16> @lshr_wrong_index_same_length_poison_basevec(i64 %x) {
 define <8 x i16> @lshr_longer_length_poison_basevec_le(i64 %x) {
 ; ALL-LABEL: @lshr_longer_length_poison_basevec_le(
 ; ALL-NEXT:    [[S:%.*]] = lshr i64 [[X:%.*]], 48
-; ALL-NEXT:    [[T:%.*]] = trunc i64 [[S]] to i16
+; ALL-NEXT:    [[T:%.*]] = trunc nuw i64 [[S]] to i16
 ; ALL-NEXT:    [[R:%.*]] = insertelement <8 x i16> poison, i16 [[T]], i64 3
 ; ALL-NEXT:    ret <8 x i16> [[R]]
 ;
@@ -250,7 +250,7 @@ define <4 x i8> @lshr_wrong_index_shorter_length_poison_basevec(i64 %x) {
 define <4 x i8> @lshr_wrong_shift_shorter_length_poison_basevec(i64 %x) {
 ; ALL-LABEL: @lshr_wrong_shift_shorter_length_poison_basevec(
 ; ALL-NEXT:    [[S:%.*]] = lshr i64 [[X:%.*]], 57
-; ALL-NEXT:    [[T:%.*]] = trunc i64 [[S]] to i8
+; ALL-NEXT:    [[T:%.*]] = trunc nuw nsw i64 [[S]] to i8
 ; ALL-NEXT:    [[R:%.*]] = insertelement <4 x i8> poison, i8 [[T]], i64 0
 ; ALL-NEXT:    ret <4 x i8> [[R]]
 ;
@@ -392,7 +392,7 @@ define <4 x i16> @lshr_same_length_basevec_be(i64 %x, <4 x i16> %v) {
 define <4 x i16> @lshr_same_length_basevec_both_endian(i64 %x, <4 x i16> %v) {
 ; ALL-LABEL: @lshr_same_length_basevec_both_endian(
 ; ALL-NEXT:    [[S:%.*]] = lshr i64 [[X:%.*]], 48
-; ALL-NEXT:    [[T:%.*]] = trunc i64 [[S]] to i16
+; ALL-NEXT:    [[T:%.*]] = trunc nuw i64 [[S]] to i16
 ; ALL-NEXT:    [[R:%.*]] = insertelement <4 x i16> [[V:%.*]], i16 [[T]], i64 3
 ; ALL-NEXT:    ret <4 x i16> [[R]]
 ;
@@ -405,7 +405,7 @@ define <4 x i16> @lshr_same_length_basevec_both_endian(i64 %x, <4 x i16> %v) {
 define <4 x i16> @lshr_wrong_index_same_length_basevec(i64 %x, <4 x i16> %v) {
 ; ALL-LABEL: @lshr_wrong_index_same_length_basevec(
 ; ALL-NEXT:    [[S:%.*]] = lshr i64 [[X:%.*]], 48
-; ALL-NEXT:    [[T:%.*]] = trunc i64 [[S]] to i16
+; ALL-NEXT:    [[T:%.*]] = trunc nuw i64 [[S]] to i16
 ; ALL-NEXT:    [[R:%.*]] = insertelement <4 x i16> [[V:%.*]], i16 [[T]], i64 1
 ; ALL-NEXT:    ret <4 x i16> [[R]]
 ;
@@ -418,7 +418,7 @@ define <4 x i16> @lshr_wrong_index_same_length_basevec(i64 %x, <4 x i16> %v) {
 define <8 x i16> @lshr_longer_length_basevec_le(i64 %x, <8 x i16> %v) {
 ; ALL-LABEL: @lshr_longer_length_basevec_le(
 ; ALL-NEXT:    [[S:%.*]] = lshr i64 [[X:%.*]], 48
-; ALL-NEXT:    [[T:%.*]] = trunc i64 [[S]] to i16
+; ALL-NEXT:    [[T:%.*]] = trunc nuw i64 [[S]] to i16
 ; ALL-NEXT:    [[R:%.*]] = insertelement <8 x i16> [[V:%.*]], i16 [[T]], i64 3
 ; ALL-NEXT:    ret <8 x i16> [[R]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/insertelt-trunc.ll b/llvm/test/Transforms/InstCombine/insertelt-trunc.ll
index a2721bf13e7433..f5f1051ea2014a 100644
--- a/llvm/test/Transforms/InstCombine/insertelt-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/insertelt-trunc.ll
@@ -9,7 +9,7 @@ declare void @use_vec(<8 x i16>)
 define <4 x i16> @insert_01_poison_v4i16(i32 %x) {
 ; BE-LABEL: @insert_01_poison_v4i16(
 ; BE-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; BE-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; BE-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; BE-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; BE-NEXT:    [[INS0:%.*]] = insertelement <4 x i16> poison, i16 [[LO16]], i64 0
 ; BE-NEXT:    [[INS1:%.*]] = insertelement <4 x i16> [[INS0]], i16 [[HI16]], i64 1
@@ -36,7 +36,7 @@ define <8 x i16> @insert_10_poison_v8i16(i32 %x) {
 ;
 ; LE-LABEL: @insert_10_poison_v8i16(
 ; LE-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; LE-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; LE-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; LE-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; LE-NEXT:    [[TMP1:%.*]] = insertelement <8 x i16> poison, i16 [[HI16]], i64 0
 ; LE-NEXT:    [[INS1:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[LO16]], i64 1
@@ -55,7 +55,7 @@ define <8 x i16> @insert_10_poison_v8i16(i32 %x) {
 define <4 x i32> @insert_12_poison_v4i32(i64 %x) {
 ; ALL-LABEL: @insert_12_poison_v4i32(
 ; ALL-NEXT:    [[HI64:%.*]] = lshr i64 [[X:%.*]], 32
-; ALL-NEXT:    [[HI32:%.*]] = trunc i64 [[HI64]] to i32
+; ALL-NEXT:    [[HI32:%.*]] = trunc nuw i64 [[HI64]] to i32
 ; ALL-NEXT:    [[LO32:%.*]] = trunc i64 [[X]] to i32
 ; ALL-NEXT:    [[INS0:%.*]] = insertelement <4 x i32> poison, i32 [[LO32]], i64 1
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <4 x i32> [[INS0]], i32 [[HI32]], i64 2
@@ -74,7 +74,7 @@ define <4 x i32> @insert_12_poison_v4i32(i64 %x) {
 define <4 x i16> @insert_21_poison_v4i16(i32 %x) {
 ; ALL-LABEL: @insert_21_poison_v4i16(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; ALL-NEXT:    [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[HI16]], i64 1
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[LO16]], i64 2
@@ -91,7 +91,7 @@ define <4 x i16> @insert_21_poison_v4i16(i32 %x) {
 define <4 x i32> @insert_23_poison_v4i32(i64 %x) {
 ; BE-LABEL: @insert_23_poison_v4i32(
 ; BE-NEXT:    [[HI64:%.*]] = lshr i64 [[X:%.*]], 32
-; BE-NEXT:    [[HI32:%.*]] = trunc i64 [[HI64]] to i32
+; BE-NEXT:    [[HI32:%.*]] = trunc nuw i64 [[HI64]] to i32
 ; BE-NEXT:    [[LO32:%.*]] = trunc i64 [[X]] to i32
 ; BE-NEXT:    [[INS0:%.*]] = insertelement <4 x i32> poison, i32 [[LO32]], i64 2
 ; BE-NEXT:    [[INS1:%.*]] = insertelement <4 x i32> [[INS0]], i32 [[HI32]], i64 3
@@ -118,7 +118,7 @@ define <4 x i16> @insert_32_poison_v4i16(i32 %x) {
 ;
 ; LE-LABEL: @insert_32_poison_v4i16(
 ; LE-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; LE-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; LE-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; LE-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; LE-NEXT:    [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[HI16]], i64 2
 ; LE-NEXT:    [[INS1:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[LO16]], i64 3
@@ -140,7 +140,7 @@ define <4 x i16> @insert_32_poison_v4i16(i32 %x) {
 define <2 x i16> @insert_01_v2i16(i32 %x, <2 x i16> %v) {
 ; BE-LABEL: @insert_01_v2i16(
 ; BE-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; BE-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; BE-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; BE-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; BE-NEXT:    [[INS0:%.*]] = insertelement <2 x i16> poison, i16 [[LO16]], i64 0
 ; BE-NEXT:    [[INS1:%.*]] = insertelement <2 x i16> [[INS0]], i16 [[HI16]], i64 1
@@ -163,7 +163,7 @@ define <2 x i16> @insert_01_v2i16(i32 %x, <2 x i16> %v) {
 define <8 x i16> @insert_10_v8i16(i32 %x, <8 x i16> %v) {
 ; ALL-LABEL: @insert_10_v8i16(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; ALL-NEXT:    [[TMP1:%.*]] = insertelement <8 x i16> [[V:%.*]], i16 [[HI16]], i64 0
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <8 x i16> [[TMP1]], i16 [[LO16]], i64 1
@@ -182,7 +182,7 @@ define <8 x i16> @insert_10_v8i16(i32 %x, <8 x i16> %v) {
 define <4 x i32> @insert_12_v4i32(i64 %x, <4 x i32> %v) {
 ; ALL-LABEL: @insert_12_v4i32(
 ; ALL-NEXT:    [[HI64:%.*]] = lshr i64 [[X:%.*]], 32
-; ALL-NEXT:    [[HI32:%.*]] = trunc i64 [[HI64]] to i32
+; ALL-NEXT:    [[HI32:%.*]] = trunc nuw i64 [[HI64]] to i32
 ; ALL-NEXT:    [[LO32:%.*]] = trunc i64 [[X]] to i32
 ; ALL-NEXT:    [[INS0:%.*]] = insertelement <4 x i32> [[V:%.*]], i32 [[LO32]], i64 1
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <4 x i32> [[INS0]], i32 [[HI32]], i64 2
@@ -201,7 +201,7 @@ define <4 x i32> @insert_12_v4i32(i64 %x, <4 x i32> %v) {
 define <4 x i16> @insert_21_v4i16(i32 %x, <4 x i16> %v) {
 ; ALL-LABEL: @insert_21_v4i16(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; ALL-NEXT:    [[TMP1:%.*]] = insertelement <4 x i16> [[V:%.*]], i16 [[HI16]], i64 1
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[LO16]], i64 2
@@ -220,7 +220,7 @@ define <4 x i16> @insert_21_v4i16(i32 %x, <4 x i16> %v) {
 define <4 x i32> @insert_23_v4i32(i64 %x, <4 x i32> %v) {
 ; ALL-LABEL: @insert_23_v4i32(
 ; ALL-NEXT:    [[HI64:%.*]] = lshr i64 [[X:%.*]], 32
-; ALL-NEXT:    [[HI32:%.*]] = trunc i64 [[HI64]] to i32
+; ALL-NEXT:    [[HI32:%.*]] = trunc nuw i64 [[HI64]] to i32
 ; ALL-NEXT:    [[LO32:%.*]] = trunc i64 [[X]] to i32
 ; ALL-NEXT:    [[INS0:%.*]] = insertelement <4 x i32> [[V:%.*]], i32 [[LO32]], i64 2
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <4 x i32> [[INS0]], i32 [[HI32]], i64 3
@@ -239,7 +239,7 @@ define <4 x i32> @insert_23_v4i32(i64 %x, <4 x i32> %v) {
 define <4 x i16> @insert_32_v4i16(i32 %x, <4 x i16> %v) {
 ; ALL-LABEL: @insert_32_v4i16(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; ALL-NEXT:    [[TMP1:%.*]] = insertelement <4 x i16> [[V:%.*]], i16 [[HI16]], i64 2
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[LO16]], i64 3
@@ -277,7 +277,7 @@ define <4 x i16> @insert_01_v4i16_wrong_shift1(i32 %x) {
 define <4 x i16> @insert_01_v4i16_wrong_op(i32 %x, i32 %y) {
 ; ALL-LABEL: @insert_01_v4i16_wrong_op(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[Y:%.*]] to i16
 ; ALL-NEXT:    [[INS0:%.*]] = insertelement <4 x i16> poison, i16 [[LO16]], i64 0
 ; ALL-NEXT:    [[INS1:%.*]] = insertelement <4 x i16> [[INS0]], i16 [[HI16]], i64 1
@@ -296,7 +296,7 @@ define <4 x i16> @insert_01_v4i16_wrong_op(i32 %x, i32 %y) {
 define <8 x i16> @insert_67_v4i16_uses1(i32 %x, <8 x i16> %v) {
 ; ALL-LABEL: @insert_67_v4i16_uses1(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    call void @use(i16 [[HI16]])
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; ALL-NEXT:    [[INS0:%.*]] = insertelement <8 x i16> [[V:%.*]], i16 [[LO16]], i64 6
@@ -318,7 +318,7 @@ define <8 x i16> @insert_67_v4i16_uses1(i32 %x, <8 x i16> %v) {
 define <8 x i16> @insert_76_v4i16_uses2(i32 %x, <8 x i16> %v) {
 ; ALL-LABEL: @insert_76_v4i16_uses2(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; ALL-NEXT:    call void @use(i16 [[LO16]])
 ; ALL-NEXT:    [[TMP1:%.*]] = insertelement <8 x i16> [[V:%.*]], i16 [[HI16]], i64 6
@@ -339,7 +339,7 @@ define <8 x i16> @insert_76_v4i16_uses2(i32 %x, <8 x i16> %v) {
 define <8 x i16> @insert_67_v4i16_uses3(i32 %x, <8 x i16> %v) {
 ; ALL-LABEL: @insert_67_v4i16_uses3(
 ; ALL-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; ALL-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; ALL-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; ALL-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; ALL-NEXT:    [[INS0:%.*]] = insertelement <8 x i16> [[V:%.*]], i16 [[LO16]], i64 6
 ; ALL-NEXT:    call void @use_vec(<8 x i16> [[INS0]])
@@ -360,7 +360,7 @@ define <8 x i16> @insert_67_v4i16_uses3(i32 %x, <8 x i16> %v) {
 define <4 x i16> @insert_01_poison_v4i16_high_first(i32 %x) {
 ; BE-LABEL: @insert_01_poison_v4i16_high_first(
 ; BE-NEXT:    [[HI32:%.*]] = lshr i32 [[X:%.*]], 16
-; BE-NEXT:    [[HI16:%.*]] = trunc i32 [[HI32]] to i16
+; BE-NEXT:    [[HI16:%.*]] = trunc nuw i32 [[HI32]] to i16
 ; BE-NEXT:    [[LO16:%.*]] = trunc i32 [[X]] to i16
 ; BE-NEXT:    [[TMP1:%.*]] = insertelement <4 x i16> poison, i16 [[LO16]], i64 0
 ; BE-NEXT:    [[INS0:%.*]] = insertelement <4 x i16> [[TMP1]], i16 [[HI16]], i64 1

diff  --git a/llvm/test/Transforms/InstCombine/known-bits.ll b/llvm/test/Transforms/InstCombine/known-bits.ll
index e27e4a3eddfbb1..d210b19bb7faf2 100644
--- a/llvm/test/Transforms/InstCombine/known-bits.ll
+++ b/llvm/test/Transforms/InstCombine/known-bits.ll
@@ -455,7 +455,7 @@ define i64 @test_icmp_trunc5(i64 %n) {
 ; CHECK-LABEL: @test_icmp_trunc5(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SHR:%.*]] = ashr i64 [[N:%.*]], 47
-; CHECK-NEXT:    [[CONV1:%.*]] = trunc i64 [[SHR]] to i32
+; CHECK-NEXT:    [[CONV1:%.*]] = trunc nsw i64 [[SHR]] to i32
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp ugt i32 [[CONV1]], -13
 ; CHECK-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]]
 ; CHECK:       if.then:

diff  --git a/llvm/test/Transforms/InstCombine/known-non-zero.ll b/llvm/test/Transforms/InstCombine/known-non-zero.ll
index 7965b47911c41d..f1c757cafefb0f 100644
--- a/llvm/test/Transforms/InstCombine/known-non-zero.ll
+++ b/llvm/test/Transforms/InstCombine/known-non-zero.ll
@@ -14,7 +14,7 @@ define i32 @test0(i64 %x) {
 ; CHECK-NEXT:    br i1 [[C]], label [[EXIT:%.*]], label [[NON_ZERO:%.*]]
 ; CHECK:       non_zero:
 ; CHECK-NEXT:    [[CTZ:%.*]] = call i64 @llvm.cttz.i64(i64 [[X]], i1 true), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT:    [[CTZ32:%.*]] = trunc i64 [[CTZ]] to i32
+; CHECK-NEXT:    [[CTZ32:%.*]] = trunc nuw nsw i64 [[CTZ]] to i32
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[CTZ32]], [[NON_ZERO]] ], [ 0, [[START:%.*]] ]
@@ -41,7 +41,7 @@ define i32 @test1(i64 %x) {
 ; CHECK-NEXT:    br i1 [[C]], label [[EXIT:%.*]], label [[NON_ZERO:%.*]]
 ; CHECK:       non_zero:
 ; CHECK-NEXT:    [[CTZ:%.*]] = call i64 @llvm.ctlz.i64(i64 [[X]], i1 true), !range [[RNG0]]
-; CHECK-NEXT:    [[CTZ32:%.*]] = trunc i64 [[CTZ]] to i32
+; CHECK-NEXT:    [[CTZ32:%.*]] = trunc nuw nsw i64 [[CTZ]] to i32
 ; CHECK-NEXT:    br label [[EXIT]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[RES:%.*]] = phi i32 [ [[CTZ32]], [[NON_ZERO]] ], [ 0, [[START:%.*]] ]

diff  --git a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
index 78654155c36cd6..d33e08ffaf9b77 100644
--- a/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
+++ b/llvm/test/Transforms/InstCombine/known-phi-recurse.ll
@@ -17,7 +17,7 @@ define i32 @single_entry_phi(i64 %x, i1 %c) {
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[END:%.*]], label [[BODY]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[Y:%.*]] = call i64 @llvm.ctpop.i64(i64 [[X:%.*]]), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw nsw i64 [[Y]] to i32
 ; CHECK-NEXT:    ret i32 [[TRUNC]]
 ;
 entry:
@@ -37,7 +37,7 @@ define i32 @two_entry_phi_with_constant(i64 %x, i1 %c) {
 ; CHECK-LABEL: @two_entry_phi_with_constant(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[Y:%.*]] = call i64 @llvm.ctpop.i64(i64 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw nsw i64 [[Y]] to i32
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[END:%.*]], label [[BODY:%.*]]
 ; CHECK:       body:
 ; CHECK-NEXT:    br label [[END]]
@@ -62,11 +62,11 @@ define i32 @two_entry_phi_non_constant(i64 %x, i64 %x2, i1 %c) {
 ; CHECK-LABEL: @two_entry_phi_non_constant(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[Y:%.*]] = call i64 @llvm.ctpop.i64(i64 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw nsw i64 [[Y]] to i32
 ; CHECK-NEXT:    br i1 [[C:%.*]], label [[END:%.*]], label [[BODY:%.*]]
 ; CHECK:       body:
 ; CHECK-NEXT:    [[Y2:%.*]] = call i64 @llvm.ctpop.i64(i64 [[X2:%.*]]), !range [[RNG0]]
-; CHECK-NEXT:    [[TRUNC2:%.*]] = trunc i64 [[Y2]] to i32
+; CHECK-NEXT:    [[TRUNC2:%.*]] = trunc nuw nsw i64 [[Y2]] to i32
 ; CHECK-NEXT:    br label [[END]]
 ; CHECK:       end:
 ; CHECK-NEXT:    [[PHI:%.*]] = phi i32 [ [[TRUNC]], [[ENTRY:%.*]] ], [ [[TRUNC2]], [[BODY]] ]
@@ -91,12 +91,12 @@ define i32 @neg_many_branches(i64 %x) {
 ; CHECK-LABEL: @neg_many_branches(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[Y:%.*]] = call i64 @llvm.ctpop.i64(i64 [[X:%.*]]), !range [[RNG0]]
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[Y]] to i32
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw nsw i64 [[Y]] to i32
 ; CHECK-NEXT:    switch i32 [[TRUNC]], label [[END:%.*]] [
-; CHECK-NEXT:    i32 1, label [[ONE:%.*]]
-; CHECK-NEXT:    i32 2, label [[TWO:%.*]]
-; CHECK-NEXT:    i32 3, label [[THREE:%.*]]
-; CHECK-NEXT:    i32 4, label [[FOUR:%.*]]
+; CHECK-NEXT:      i32 1, label [[ONE:%.*]]
+; CHECK-NEXT:      i32 2, label [[TWO:%.*]]
+; CHECK-NEXT:      i32 3, label [[THREE:%.*]]
+; CHECK-NEXT:      i32 4, label [[FOUR:%.*]]
 ; CHECK-NEXT:    ]
 ; CHECK:       one:
 ; CHECK-NEXT:    [[A:%.*]] = add nuw nsw i32 [[TRUNC]], 1

diff  --git a/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll b/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll
index b3d147621b59e5..20d60206ebcdff 100644
--- a/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll
@@ -647,7 +647,7 @@ define <4 x i32> @computesignbits_through_shuffles(<4 x float> %x, <4 x float> %
 ; CHECK-NEXT:    [[S3:%.*]] = shufflevector <4 x i32> [[SHUF_OR1]], <4 x i32> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
 ; CHECK-NEXT:    [[S4:%.*]] = shufflevector <4 x i32> [[SHUF_OR1]], <4 x i32> poison, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
 ; CHECK-NEXT:    [[SHUF_OR2:%.*]] = or <4 x i32> [[S3]], [[S4]]
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc <4 x i32> [[SHUF_OR2]] to <4 x i1>
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nsw <4 x i32> [[SHUF_OR2]] to <4 x i1>
 ; CHECK-NEXT:    [[SEL_V:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[Z:%.*]], <4 x float> [[X]]
 ; CHECK-NEXT:    [[SEL:%.*]] = bitcast <4 x float> [[SEL_V]] to <4 x i32>
 ; CHECK-NEXT:    ret <4 x i32> [[SEL]]

diff  --git a/llvm/test/Transforms/InstCombine/logical-select.ll b/llvm/test/Transforms/InstCombine/logical-select.ll
index c850b87bb2dd4c..6e2ed6bf796d08 100644
--- a/llvm/test/Transforms/InstCombine/logical-select.ll
+++ b/llvm/test/Transforms/InstCombine/logical-select.ll
@@ -683,7 +683,7 @@ define <4 x i32> @computesignbits_through_shuffles(<4 x float> %x, <4 x float> %
 ; CHECK-NEXT:    [[S3:%.*]] = shufflevector <4 x i32> [[SHUF_OR1]], <4 x i32> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
 ; CHECK-NEXT:    [[S4:%.*]] = shufflevector <4 x i32> [[SHUF_OR1]], <4 x i32> poison, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
 ; CHECK-NEXT:    [[SHUF_OR2:%.*]] = or <4 x i32> [[S3]], [[S4]]
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc <4 x i32> [[SHUF_OR2]] to <4 x i1>
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nsw <4 x i32> [[SHUF_OR2]] to <4 x i1>
 ; CHECK-NEXT:    [[SEL_V:%.*]] = select <4 x i1> [[TMP1]], <4 x float> [[Z:%.*]], <4 x float> [[X]]
 ; CHECK-NEXT:    [[SEL:%.*]] = bitcast <4 x float> [[SEL_V]] to <4 x i32>
 ; CHECK-NEXT:    ret <4 x i32> [[SEL]]

diff  --git a/llvm/test/Transforms/InstCombine/lshr-trunc-sext-to-ashr-sext.ll b/llvm/test/Transforms/InstCombine/lshr-trunc-sext-to-ashr-sext.ll
index 8d82213d8022f7..8e7491ee403701 100644
--- a/llvm/test/Transforms/InstCombine/lshr-trunc-sext-to-ashr-sext.ll
+++ b/llvm/test/Transforms/InstCombine/lshr-trunc-sext-to-ashr-sext.ll
@@ -91,7 +91,7 @@ define <2 x i16> @t5_vec_undef(<2 x i8> %x) {
 define i16 @t6_extrause0(i8 %x) {
 ; CHECK-LABEL: @t6_extrause0(
 ; CHECK-NEXT:    [[A:%.*]] = lshr i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[B:%.*]] = trunc i8 [[A]] to i4
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw i8 [[A]] to i4
 ; CHECK-NEXT:    call void @use4(i4 [[B]])
 ; CHECK-NEXT:    [[C:%.*]] = sext i4 [[B]] to i16
 ; CHECK-NEXT:    ret i16 [[C]]
@@ -157,7 +157,7 @@ define i16 @t10_extrause2(i8 %x) {
 ; CHECK-LABEL: @t10_extrause2(
 ; CHECK-NEXT:    [[A:%.*]] = lshr i8 [[X:%.*]], 4
 ; CHECK-NEXT:    call void @use8(i8 [[A]])
-; CHECK-NEXT:    [[B:%.*]] = trunc i8 [[A]] to i4
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw i8 [[A]] to i4
 ; CHECK-NEXT:    call void @use4(i4 [[B]])
 ; CHECK-NEXT:    [[C:%.*]] = sext i4 [[B]] to i16
 ; CHECK-NEXT:    ret i16 [[C]]
@@ -189,7 +189,7 @@ define <2 x i16> @t11_extrause2_vec_undef(<2 x i8> %x) {
 define <2 x i10> @wide_source_shifted_signbit(<2 x i32> %x) {
 ; CHECK-LABEL: @wide_source_shifted_signbit(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <2 x i32> [[X:%.*]], <i32 24, i32 24>
-; CHECK-NEXT:    [[C:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i10>
+; CHECK-NEXT:    [[C:%.*]] = trunc nsw <2 x i32> [[TMP1]] to <2 x i10>
 ; CHECK-NEXT:    ret <2 x i10> [[C]]
 ;
   %a = lshr <2 x i32> %x, <i32 24, i32 24>
@@ -203,7 +203,7 @@ define i10 @wide_source_shifted_signbit_use1(i32 %x) {
 ; CHECK-NEXT:    [[A:%.*]] = lshr i32 [[X:%.*]], 24
 ; CHECK-NEXT:    call void @use32(i32 [[A]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X]], 24
-; CHECK-NEXT:    [[C:%.*]] = trunc i32 [[TMP1]] to i10
+; CHECK-NEXT:    [[C:%.*]] = trunc nsw i32 [[TMP1]] to i10
 ; CHECK-NEXT:    ret i10 [[C]]
 ;
   %a = lshr i32 %x, 24
@@ -216,7 +216,7 @@ define i10 @wide_source_shifted_signbit_use1(i32 %x) {
 define i10 @wide_source_shifted_signbit_use2(i32 %x) {
 ; CHECK-LABEL: @wide_source_shifted_signbit_use2(
 ; CHECK-NEXT:    [[A:%.*]] = lshr i32 [[X:%.*]], 24
-; CHECK-NEXT:    [[B:%.*]] = trunc i32 [[A]] to i8
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw i32 [[A]] to i8
 ; CHECK-NEXT:    call void @use8(i8 [[B]])
 ; CHECK-NEXT:    [[C:%.*]] = sext i8 [[B]] to i10
 ; CHECK-NEXT:    ret i10 [[C]]
@@ -256,7 +256,7 @@ define i32 @same_source_shifted_signbit_use1(i32 %x) {
 define i32 @same_source_shifted_signbit_use2(i32 %x) {
 ; CHECK-LABEL: @same_source_shifted_signbit_use2(
 ; CHECK-NEXT:    [[A:%.*]] = lshr i32 [[X:%.*]], 24
-; CHECK-NEXT:    [[B:%.*]] = trunc i32 [[A]] to i8
+; CHECK-NEXT:    [[B:%.*]] = trunc nuw i32 [[A]] to i8
 ; CHECK-NEXT:    call void @use8(i8 [[B]])
 ; CHECK-NEXT:    [[C:%.*]] = sext i8 [[B]] to i32
 ; CHECK-NEXT:    ret i32 [[C]]

diff  --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index 02c2bbc2819b8c..7d611ba188d6b4 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -476,7 +476,7 @@ define i32 @srem2_lshr30(i32 %x) {
 define i12 @trunc_sandwich(i32 %x) {
 ; CHECK-LABEL: @trunc_sandwich(
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X:%.*]], 30
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 28
@@ -488,7 +488,7 @@ define i12 @trunc_sandwich(i32 %x) {
 define <2 x i12> @trunc_sandwich_splat_vec(<2 x i32> %x) {
 ; CHECK-LABEL: @trunc_sandwich_splat_vec(
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 30, i32 30>
-; CHECK-NEXT:    [[R1:%.*]] = trunc <2 x i32> [[SUM_SHIFT]] to <2 x i12>
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw <2 x i32> [[SUM_SHIFT]] to <2 x i12>
 ; CHECK-NEXT:    ret <2 x i12> [[R1]]
 ;
   %sh = lshr <2 x i32> %x, <i32 22, i32 22>
@@ -500,7 +500,7 @@ define <2 x i12> @trunc_sandwich_splat_vec(<2 x i32> %x) {
 define i12 @trunc_sandwich_min_shift1(i32 %x) {
 ; CHECK-LABEL: @trunc_sandwich_min_shift1(
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X:%.*]], 21
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 20
@@ -512,7 +512,7 @@ define i12 @trunc_sandwich_min_shift1(i32 %x) {
 define i12 @trunc_sandwich_small_shift1(i32 %x) {
 ; CHECK-LABEL: @trunc_sandwich_small_shift1(
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X:%.*]], 20
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    [[R:%.*]] = and i12 [[R1]], 2047
 ; CHECK-NEXT:    ret i12 [[R]]
 ;
@@ -525,7 +525,7 @@ define i12 @trunc_sandwich_small_shift1(i32 %x) {
 define i12 @trunc_sandwich_max_sum_shift(i32 %x) {
 ; CHECK-LABEL: @trunc_sandwich_max_sum_shift(
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 20
@@ -537,7 +537,7 @@ define i12 @trunc_sandwich_max_sum_shift(i32 %x) {
 define i12 @trunc_sandwich_max_sum_shift2(i32 %x) {
 ; CHECK-LABEL: @trunc_sandwich_max_sum_shift2(
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 30
@@ -571,7 +571,7 @@ define i12 @trunc_sandwich_use1(i32 %x) {
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i32 [[X:%.*]], 28
 ; CHECK-NEXT:    call void @use(i32 [[SH]])
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X]], 30
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 28
@@ -586,7 +586,7 @@ define <3 x i9> @trunc_sandwich_splat_vec_use1(<3 x i14> %x) {
 ; CHECK-NEXT:    [[SH:%.*]] = lshr <3 x i14> [[X:%.*]], <i14 6, i14 6, i14 6>
 ; CHECK-NEXT:    call void @usevec(<3 x i14> [[SH]])
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr <3 x i14> [[X]], <i14 11, i14 11, i14 11>
-; CHECK-NEXT:    [[R1:%.*]] = trunc <3 x i14> [[SUM_SHIFT]] to <3 x i9>
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw <3 x i14> [[SUM_SHIFT]] to <3 x i9>
 ; CHECK-NEXT:    ret <3 x i9> [[R1]]
 ;
   %sh = lshr <3 x i14> %x, <i14 6, i14 6, i14 6>
@@ -601,7 +601,7 @@ define i12 @trunc_sandwich_min_shift1_use1(i32 %x) {
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i32 [[X:%.*]], 20
 ; CHECK-NEXT:    call void @use(i32 [[SH]])
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X]], 21
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 20
@@ -633,7 +633,7 @@ define i12 @trunc_sandwich_max_sum_shift_use1(i32 %x) {
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i32 [[X:%.*]], 20
 ; CHECK-NEXT:    call void @use(i32 [[SH]])
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X]], 31
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 20
@@ -648,7 +648,7 @@ define i12 @trunc_sandwich_max_sum_shift2_use1(i32 %x) {
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i32 [[X:%.*]], 30
 ; CHECK-NEXT:    call void @use(i32 [[SH]])
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i32 [[X]], 31
-; CHECK-NEXT:    [[R1:%.*]] = trunc i32 [[SUM_SHIFT]] to i12
+; CHECK-NEXT:    [[R1:%.*]] = trunc nuw nsw i32 [[SUM_SHIFT]] to i12
 ; CHECK-NEXT:    ret i12 [[R1]]
 ;
   %sh = lshr i32 %x, 30

diff  --git a/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll b/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
index 58174f21f767f9..866381ff2887f9 100644
--- a/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
+++ b/llvm/test/Transforms/InstCombine/merging-multiple-stores-into-successor.ll
@@ -320,7 +320,7 @@ define void @pr46688(i1 %cond, i32 %x, i16 %d, ptr %p1, ptr %p2) {
 ; CHECK-NEXT:    [[THR1_PN:%.*]] = lshr i32 [[THR_PN]], [[X]]
 ; CHECK-NEXT:    [[THR2_PN:%.*]] = lshr i32 [[THR1_PN]], [[X]]
 ; CHECK-NEXT:    [[STOREMERGE:%.*]] = lshr i32 [[THR2_PN]], [[X]]
-; CHECK-NEXT:    [[STOREMERGE1:%.*]] = trunc i32 [[STOREMERGE]] to i16
+; CHECK-NEXT:    [[STOREMERGE1:%.*]] = trunc nuw i32 [[STOREMERGE]] to i16
 ; CHECK-NEXT:    store i16 [[STOREMERGE1]], ptr [[P1:%.*]], align 2
 ; CHECK-NEXT:    store i32 [[STOREMERGE]], ptr [[P2:%.*]], align 4
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/InstCombine/narrow.ll b/llvm/test/Transforms/InstCombine/narrow.ll
index 781974d33bf115..40229f8511f76d 100644
--- a/llvm/test/Transforms/InstCombine/narrow.ll
+++ b/llvm/test/Transforms/InstCombine/narrow.ll
@@ -76,7 +76,7 @@ define <2 x i8> @shrink_or_vec(<2 x i16> %a) {
 define i31 @shrink_and(i64 %a) {
 ; CHECK-LABEL: @shrink_and(
 ; CHECK-NEXT:    [[AND:%.*]] = and i64 [[A:%.*]], 42
-; CHECK-NEXT:    [[TRUNC:%.*]] = trunc i64 [[AND]] to i31
+; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw nsw i64 [[AND]] to i31
 ; CHECK-NEXT:    ret i31 [[TRUNC]]
 ;
   %and = and i64 %a, 42

diff  --git a/llvm/test/Transforms/InstCombine/negated-bitmask.ll b/llvm/test/Transforms/InstCombine/negated-bitmask.ll
index fe2386bd65c318..91886781863476 100644
--- a/llvm/test/Transforms/InstCombine/negated-bitmask.ll
+++ b/llvm/test/Transforms/InstCombine/negated-bitmask.ll
@@ -70,7 +70,7 @@ define i8 @sub_mask1_trunc_lshr(i64 %a0) {
 ; CHECK-LABEL: @sub_mask1_trunc_lshr(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[A0:%.*]], 48
 ; CHECK-NEXT:    [[TMP2:%.*]] = ashr i64 [[TMP1]], 63
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP2]] to i8
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc nsw i64 [[TMP2]] to i8
 ; CHECK-NEXT:    [[NEG:%.*]] = add nsw i8 [[TMP3]], 10
 ; CHECK-NEXT:    ret i8 [[NEG]]
 ;
@@ -85,7 +85,7 @@ define i32 @sub_sext_mask1_trunc_lshr(i64 %a0) {
 ; CHECK-LABEL: @sub_sext_mask1_trunc_lshr(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i64 [[A0:%.*]], 48
 ; CHECK-NEXT:    [[TMP2:%.*]] = ashr i64 [[TMP1]], 63
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP2]] to i8
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc nsw i64 [[TMP2]] to i8
 ; CHECK-NEXT:    [[NARROW:%.*]] = add nsw i8 [[TMP3]], 10
 ; CHECK-NEXT:    [[NEG:%.*]] = zext i8 [[NARROW]] to i32
 ; CHECK-NEXT:    ret i32 [[NEG]]

diff  --git a/llvm/test/Transforms/InstCombine/pr34349.ll b/llvm/test/Transforms/InstCombine/pr34349.ll
index ea4afaa245c66d..89947650b26b40 100644
--- a/llvm/test/Transforms/InstCombine/pr34349.ll
+++ b/llvm/test/Transforms/InstCombine/pr34349.ll
@@ -7,7 +7,7 @@ define i8 @fast_div_201(i8 %p) {
 ; CHECK-NEXT:    [[V3:%.*]] = zext i8 [[P:%.*]] to i16
 ; CHECK-NEXT:    [[V4:%.*]] = mul nuw nsw i16 [[V3]], 71
 ; CHECK-NEXT:    [[V5:%.*]] = lshr i16 [[V4]], 8
-; CHECK-NEXT:    [[V6:%.*]] = trunc i16 [[V5]] to i8
+; CHECK-NEXT:    [[V6:%.*]] = trunc nuw nsw i16 [[V5]] to i8
 ; CHECK-NEXT:    [[V7:%.*]] = sub i8 [[P]], [[V6]]
 ; CHECK-NEXT:    [[V8:%.*]] = lshr i8 [[V7]], 1
 ; CHECK-NEXT:    [[V13:%.*]] = add nuw i8 [[V8]], [[V6]]

diff  --git a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
index ad55b506a108bb..b94be990199bf5 100644
--- a/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
+++ b/llvm/test/Transforms/InstCombine/reduction-add-sext-zext-i1.ll
@@ -53,7 +53,7 @@ define i8 @reduce_add_zext_long(<128 x i1> %x) {
 ; CHECK-LABEL: @reduce_add_zext_long(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i128 @llvm.ctpop.i128(i128 [[TMP1]]), !range [[RNG3:![0-9]+]]
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc i128 [[TMP2]] to i8
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc nuw i128 [[TMP2]] to i8
 ; CHECK-NEXT:    [[RES:%.*]] = sub i8 0, [[TMP3]]
 ; CHECK-NEXT:    ret i8 [[RES]]
 ;
@@ -67,7 +67,7 @@ define i8 @reduce_add_zext_long_external_use(<128 x i1> %x) {
 ; CHECK-LABEL: @reduce_add_zext_long_external_use(
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <128 x i1> [[X:%.*]] to i128
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i128 @llvm.ctpop.i128(i128 [[TMP1]]), !range [[RNG3]]
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc i128 [[TMP2]] to i8
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc nuw i128 [[TMP2]] to i8
 ; CHECK-NEXT:    [[RES:%.*]] = sub i8 0, [[TMP3]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <128 x i1> [[X]], i64 0
 ; CHECK-NEXT:    [[EXT:%.*]] = sext i1 [[TMP4]] to i8

diff  --git a/llvm/test/Transforms/InstCombine/sadd_sat.ll b/llvm/test/Transforms/InstCombine/sadd_sat.ll
index 5ccb6f92b6c722..1cce297122f8a1 100644
--- a/llvm/test/Transforms/InstCombine/sadd_sat.ll
+++ b/llvm/test/Transforms/InstCombine/sadd_sat.ll
@@ -79,7 +79,7 @@ define i32 @smul_sat32(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = mul nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smin.i64(i64 [[ADD]], i64 2147483647)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smax.i64(i64 [[SPEC_STORE_SELECT]], i64 -2147483648)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
 entry:
@@ -102,7 +102,7 @@ define i32 @smul_sat32_mm(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = mul nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smin.i64(i64 [[ADD]], i64 2147483647)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smax.i64(i64 [[SPEC_STORE_SELECT]], i64 -2147483648)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
 entry:
@@ -295,7 +295,7 @@ define signext i4 @sadd_sat4(i4 signext %a, i4 signext %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i32 @llvm.smin.i32(i32 [[ADD]], i32 7)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT10:%.*]] = call i32 @llvm.smax.i32(i32 [[SPEC_STORE_SELECT]], i32 -8)
-; CHECK-NEXT:    [[CONV9:%.*]] = trunc i32 [[SPEC_STORE_SELECT10]] to i4
+; CHECK-NEXT:    [[CONV9:%.*]] = trunc nsw i32 [[SPEC_STORE_SELECT10]] to i4
 ; CHECK-NEXT:    ret i4 [[CONV9]]
 ;
 entry:
@@ -318,7 +318,7 @@ define signext i4 @ssub_sat4(i4 signext %a, i4 signext %b) {
 ; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[CONV]], [[CONV1]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i32 @llvm.smin.i32(i32 [[SUB]], i32 7)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT10:%.*]] = call i32 @llvm.smax.i32(i32 [[SPEC_STORE_SELECT]], i32 -8)
-; CHECK-NEXT:    [[CONV9:%.*]] = trunc i32 [[SPEC_STORE_SELECT10]] to i4
+; CHECK-NEXT:    [[CONV9:%.*]] = trunc nsw i32 [[SPEC_STORE_SELECT10]] to i4
 ; CHECK-NEXT:    ret i4 [[CONV9]]
 ;
 entry:
@@ -465,7 +465,7 @@ define i32 @sadd_sat32_extrause_2(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smin.i64(i64 [[ADD]], i64 2147483647)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smax.i64(i64 [[SPEC_STORE_SELECT]], i64 -2147483648)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    call void @use64(i64 [[SPEC_STORE_SELECT]])
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
@@ -490,7 +490,7 @@ define i32 @sadd_sat32_extrause_2_mm(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smin.i64(i64 [[ADD]], i64 2147483647)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smax.i64(i64 [[SPEC_STORE_SELECT]], i64 -2147483648)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    call void @use64(i64 [[SPEC_STORE_SELECT]])
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
@@ -513,7 +513,7 @@ define i32 @sadd_sat32_extrause_3(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smin.i64(i64 [[ADD]], i64 2147483647)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smax.i64(i64 [[SPEC_STORE_SELECT]], i64 -2147483648)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    call void @use64(i64 [[ADD]])
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
@@ -538,7 +538,7 @@ define i32 @sadd_sat32_extrause_3_mm(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smin.i64(i64 [[ADD]], i64 2147483647)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smax.i64(i64 [[SPEC_STORE_SELECT]], i64 -2147483648)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    call void @use64(i64 [[ADD]])
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
@@ -561,7 +561,7 @@ define i32 @sadd_sat32_trunc(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smin.i64(i64 [[ADD]], i64 32767)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smax.i64(i64 [[SPEC_STORE_SELECT]], i64 -32768)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
 entry:
@@ -603,7 +603,7 @@ define i8 @sadd_sat8_ext8(i8 %a, i16 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i32 @llvm.smin.i32(i32 [[ADD]], i32 127)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i32 @llvm.smax.i32(i32 [[SPEC_STORE_SELECT]], i32 -128)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i32 [[SPEC_STORE_SELECT8]] to i8
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i32 [[SPEC_STORE_SELECT8]] to i8
 ; CHECK-NEXT:    ret i8 [[CONV7]]
 ;
 entry:
@@ -625,7 +625,7 @@ define i32 @sadd_sat32_zext(i32 %a, i32 %b) {
 ; CHECK-NEXT:    [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64
 ; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i64 [[CONV1]], [[CONV]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.umin.i64(i64 [[ADD]], i64 2147483647)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nuw nsw i64 [[SPEC_STORE_SELECT]] to i32
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
 entry:
@@ -680,7 +680,7 @@ define i32 @ashrA(i64 %a, i32 %b) {
 ; CHECK-LABEL: @ashrA(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw i64 [[TMP0]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP1]], i32 [[B:%.*]])
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
@@ -698,7 +698,7 @@ define i32 @ashrB(i32 %a, i64 %b) {
 ; CHECK-LABEL: @ashrB(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = lshr i64 [[B:%.*]], 32
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw i64 [[TMP0]] to i32
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP1]], i32 [[A:%.*]])
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
@@ -719,8 +719,8 @@ define i32 @ashrAB(i64 %a, i64 %b) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = lshr i64 [[A:%.*]], 32
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[B:%.*]], 32
-; CHECK-NEXT:    [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc i64 [[TMP0]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = trunc nuw i64 [[TMP1]] to i32
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc nuw i64 [[TMP0]] to i32
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP2]], i32 [[TMP3]])
 ; CHECK-NEXT:    ret i32 [[TMP4]]
 ;
@@ -744,7 +744,7 @@ define i32 @ashrA31(i64 %a, i32 %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nsw i64 [[CONV]], [[CONV1]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call i64 @llvm.smax.i64(i64 [[ADD]], i64 -2147483648)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call i64 @llvm.smin.i64(i64 [[SPEC_STORE_SELECT]], i64 2147483647)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i64 [[SPEC_STORE_SELECT8]] to i32
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i64 [[SPEC_STORE_SELECT8]] to i32
 ; CHECK-NEXT:    ret i32 [[CONV7]]
 ;
 entry:
@@ -763,7 +763,7 @@ define i32 @ashrA33(i64 %a, i32 %b) {
 ; CHECK-LABEL: @ashrA33(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CONV:%.*]] = ashr i64 [[A:%.*]], 33
-; CHECK-NEXT:    [[TMP0:%.*]] = trunc i64 [[CONV]] to i32
+; CHECK-NEXT:    [[TMP0:%.*]] = trunc nsw i64 [[CONV]] to i32
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP0]], i32 [[B:%.*]])
 ; CHECK-NEXT:    ret i32 [[TMP1]]
 ;
@@ -787,7 +787,7 @@ define <2 x i8> @ashrv2i8(<2 x i16> %a, <2 x i8> %b) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add <2 x i16> [[CONV]], [[CONV1]]
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT:%.*]] = call <2 x i16> @llvm.smax.v2i16(<2 x i16> [[ADD]], <2 x i16> <i16 -128, i16 -128>)
 ; CHECK-NEXT:    [[SPEC_STORE_SELECT8:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[SPEC_STORE_SELECT]], <2 x i16> <i16 127, i16 127>)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc <2 x i16> [[SPEC_STORE_SELECT8]] to <2 x i8>
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw <2 x i16> [[SPEC_STORE_SELECT8]] to <2 x i8>
 ; CHECK-NEXT:    ret <2 x i8> [[CONV7]]
 ;
 entry:
@@ -806,7 +806,7 @@ define <2 x i8> @ashrv2i8_s(<2 x i16> %a, <2 x i8> %b) {
 ; CHECK-LABEL: @ashrv2i8_s(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = lshr <2 x i16> [[A:%.*]], <i16 8, i16 8>
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i16> [[TMP0]] to <2 x i8>
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc nuw <2 x i16> [[TMP0]] to <2 x i8>
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> [[TMP1]], <2 x i8> [[B:%.*]])
 ; CHECK-NEXT:    ret <2 x i8> [[TMP2]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll b/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
index 03dd6188ac039f..69896f855f5f16 100644
--- a/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
+++ b/llvm/test/Transforms/InstCombine/select-cmp-cttz-ctlz.ll
@@ -220,7 +220,7 @@ define i64 @test6c(i32 %x) {
 define i16 @test1d(i64 %x) {
 ; CHECK-LABEL: @test1d(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[CT]] to i16
+; CHECK-NEXT:    [[CONV:%.*]] = trunc nuw nsw i64 [[CT]] to i16
 ; CHECK-NEXT:    ret i16 [[CONV]]
 ;
   %ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
@@ -233,7 +233,7 @@ define i16 @test1d(i64 %x) {
 define i32 @test2d(i64 %x) {
 ; CHECK-LABEL: @test2d(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i32
 ; CHECK-NEXT:    ret i32 [[CAST]]
 ;
   %ct = tail call i64 @llvm.cttz.i64(i64 %x, i1 true)
@@ -246,7 +246,7 @@ define i32 @test2d(i64 %x) {
 define i16 @test3d(i32 %x) {
 ; CHECK-LABEL: @test3d(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i32 [[CT]] to i16
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i32 [[CT]] to i16
 ; CHECK-NEXT:    ret i16 [[CAST]]
 ;
   %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
@@ -259,7 +259,7 @@ define i16 @test3d(i32 %x) {
 define i16 @test4d(i64 %x) {
 ; CHECK-LABEL: @test4d(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i16
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i16
 ; CHECK-NEXT:    ret i16 [[CAST]]
 ;
   %ct = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
@@ -272,7 +272,7 @@ define i16 @test4d(i64 %x) {
 define i32 @test5d(i64 %x) {
 ; CHECK-LABEL: @test5d(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i32
 ; CHECK-NEXT:    ret i32 [[CAST]]
 ;
   %ct = tail call i64 @llvm.ctlz.i64(i64 %x, i1 true)
@@ -288,7 +288,7 @@ define i32 @not_op_ctlz(i64 %x) {
 ; CHECK-LABEL: @not_op_ctlz(
 ; CHECK-NEXT:    [[N:%.*]] = xor i64 [[X:%.*]], -1
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[N]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i32
 ; CHECK-NEXT:    ret i32 [[CAST]]
 ;
   %n = xor i64 %x, -1
@@ -303,7 +303,7 @@ define i32 @not_op_cttz(i64 %x) {
 ; CHECK-LABEL: @not_op_cttz(
 ; CHECK-NEXT:    [[N:%.*]] = xor i64 [[X:%.*]], -1
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[N]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i32
 ; CHECK-NEXT:    ret i32 [[CAST]]
 ;
   %n = xor i64 %x, -1
@@ -320,7 +320,7 @@ define i32 @not_op_ctlz_wrong_xor_op1(i64 %x) {
 ; CHECK-LABEL: @not_op_ctlz_wrong_xor_op1(
 ; CHECK-NEXT:    [[N:%.*]] = xor i64 [[X:%.*]], -2
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[N]], i1 true), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i32
 ; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i64 [[X]], -1
 ; CHECK-NEXT:    [[R:%.*]] = select i1 [[TOBOOL]], i32 64, i32 [[CAST]]
 ; CHECK-NEXT:    ret i32 [[R]]
@@ -339,7 +339,7 @@ define i32 @not_op_ctlz_wrong_xor_op0(i64 %x, i64 %y) {
 ; CHECK-LABEL: @not_op_ctlz_wrong_xor_op0(
 ; CHECK-NEXT:    [[N:%.*]] = xor i64 [[Y:%.*]], -1
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.ctlz.i64(i64 [[N]], i1 true), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i32
 ; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i64 [[X:%.*]], -1
 ; CHECK-NEXT:    [[R:%.*]] = select i1 [[TOBOOL]], i32 64, i32 [[CAST]]
 ; CHECK-NEXT:    ret i32 [[R]]
@@ -358,7 +358,7 @@ define i32 @not_op_cttz_wrong_cmp(i64 %x) {
 ; CHECK-LABEL: @not_op_cttz_wrong_cmp(
 ; CHECK-NEXT:    [[N:%.*]] = xor i64 [[X:%.*]], -1
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[N]], i1 true), !range [[RNG2]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i64 [[CT]] to i32
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i64 [[CT]] to i32
 ; CHECK-NEXT:    [[TOBOOL:%.*]] = icmp eq i64 [[X]], 0
 ; CHECK-NEXT:    [[R:%.*]] = select i1 [[TOBOOL]], i32 64, i32 [[CAST]]
 ; CHECK-NEXT:    ret i32 [[R]]
@@ -374,7 +374,7 @@ define i32 @not_op_cttz_wrong_cmp(i64 %x) {
 define i16 @test6d(i32 %x) {
 ; CHECK-LABEL: @test6d(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.ctlz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CAST:%.*]] = trunc i32 [[CT]] to i16
+; CHECK-NEXT:    [[CAST:%.*]] = trunc nuw nsw i32 [[CT]] to i16
 ; CHECK-NEXT:    ret i16 [[CAST]]
 ;
   %ct = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
@@ -400,7 +400,7 @@ define i64 @select_bug1(i32 %x) {
 define i16 @select_bug2(i32 %x) {
 ; CHECK-LABEL: @select_bug2(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 false), !range [[RNG1]]
-; CHECK-NEXT:    [[CONV:%.*]] = trunc i32 [[CT]] to i16
+; CHECK-NEXT:    [[CONV:%.*]] = trunc nuw nsw i32 [[CT]] to i16
 ; CHECK-NEXT:    ret i16 [[CONV]]
 ;
   %ct = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
@@ -595,7 +595,7 @@ define i64 @test_multiuse_zext_undef(i32 %x, ptr %p) {
 define i16 @test_multiuse_trunc_def(i64 %x, ptr %p) {
 ; CHECK-LABEL: @test_multiuse_trunc_def(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[CT]] to i16
+; CHECK-NEXT:    [[CONV:%.*]] = trunc nuw nsw i64 [[CT]] to i16
 ; CHECK-NEXT:    store i16 [[CONV]], ptr [[P:%.*]], align 2
 ; CHECK-NEXT:    ret i16 [[CONV]]
 ;
@@ -610,7 +610,7 @@ define i16 @test_multiuse_trunc_def(i64 %x, ptr %p) {
 define i16 @test_multiuse_trunc_undef(i64 %x, ptr %p) {
 ; CHECK-LABEL: @test_multiuse_trunc_undef(
 ; CHECK-NEXT:    [[CT:%.*]] = tail call i64 @llvm.cttz.i64(i64 [[X:%.*]], i1 false), !range [[RNG2]]
-; CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[CT]] to i16
+; CHECK-NEXT:    [[CONV:%.*]] = trunc nuw nsw i64 [[CT]] to i16
 ; CHECK-NEXT:    store i16 [[CONV]], ptr [[P:%.*]], align 2
 ; CHECK-NEXT:    ret i16 [[CONV]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/select-imm-canon.ll b/llvm/test/Transforms/InstCombine/select-imm-canon.ll
index cde6329fd1b276..6d57af9d939d91 100644
--- a/llvm/test/Transforms/InstCombine/select-imm-canon.ll
+++ b/llvm/test/Transforms/InstCombine/select-imm-canon.ll
@@ -20,7 +20,7 @@ define i8 @double(i32 %A) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.smax.i32(i32 [[A:%.*]], i32 -128)
 ; CHECK-NEXT:    [[CONV71:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP0]], i32 127)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i32 [[CONV71]] to i8
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i32 [[CONV71]] to i8
 ; CHECK-NEXT:    ret i8 [[CONV7]]
 ;
 entry:
@@ -51,7 +51,7 @@ define i8 @original(i32 %A, i32 %B) {
 ; CHECK-LABEL: @original(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.smax.i32(i32 [[A:%.*]], i32 -128)
 ; CHECK-NEXT:    [[SPEC_SELECT_I:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP1]], i32 127)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i32 [[SPEC_SELECT_I]] to i8
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i32 [[SPEC_SELECT_I]] to i8
 ; CHECK-NEXT:    ret i8 [[CONV7]]
 ;
   %cmp4.i = icmp slt i32 127, %A
@@ -68,7 +68,7 @@ define i8 @original_logical(i32 %A, i32 %B) {
 ; CHECK-LABEL: @original_logical(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.smax.i32(i32 [[A:%.*]], i32 -128)
 ; CHECK-NEXT:    [[SPEC_SELECT_I:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP1]], i32 127)
-; CHECK-NEXT:    [[CONV7:%.*]] = trunc i32 [[SPEC_SELECT_I]] to i8
+; CHECK-NEXT:    [[CONV7:%.*]] = trunc nsw i32 [[SPEC_SELECT_I]] to i8
 ; CHECK-NEXT:    ret i8 [[CONV7]]
 ;
   %cmp4.i = icmp slt i32 127, %A

diff  --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index 05fcf662352954..bd8145ab2a35bc 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -452,7 +452,7 @@ define i64 @test21(i32 %x) {
 define i16 @test22(i32 %x) {
 ; CHECK-LABEL: @test22(
 ; CHECK-NEXT:    [[X_LOBIT:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[RETVAL:%.*]] = trunc i32 [[X_LOBIT]] to i16
+; CHECK-NEXT:    [[RETVAL:%.*]] = trunc nsw i32 [[X_LOBIT]] to i16
 ; CHECK-NEXT:    ret i16 [[RETVAL]]
 ;
   %t = icmp slt i32 %x, 0

diff  --git a/llvm/test/Transforms/InstCombine/sext-of-trunc-nsw.ll b/llvm/test/Transforms/InstCombine/sext-of-trunc-nsw.ll
index 5b9334ab93cb4c..b992460d0be698 100644
--- a/llvm/test/Transforms/InstCombine/sext-of-trunc-nsw.ll
+++ b/llvm/test/Transforms/InstCombine/sext-of-trunc-nsw.ll
@@ -86,7 +86,7 @@ define i16 @t5_extrause(i8 %x) {
 ; CHECK-LABEL: @t5_extrause(
 ; CHECK-NEXT:    [[A:%.*]] = ashr i8 [[X:%.*]], 5
 ; CHECK-NEXT:    call void @use8(i8 [[A]])
-; CHECK-NEXT:    [[B:%.*]] = trunc i8 [[A]] to i4
+; CHECK-NEXT:    [[B:%.*]] = trunc nsw i8 [[A]] to i4
 ; CHECK-NEXT:    call void @use4(i4 [[B]])
 ; CHECK-NEXT:    [[C:%.*]] = sext i8 [[A]] to i16
 ; CHECK-NEXT:    ret i16 [[C]]
@@ -134,7 +134,7 @@ define i24 @wide_source_matching_signbits(i32 %x) {
 ; CHECK-LABEL: @wide_source_matching_signbits(
 ; CHECK-NEXT:    [[M:%.*]] = and i32 [[X:%.*]], 7
 ; CHECK-NEXT:    [[A:%.*]] = shl nsw i32 -1, [[M]]
-; CHECK-NEXT:    [[C:%.*]] = trunc i32 [[A]] to i24
+; CHECK-NEXT:    [[C:%.*]] = trunc nsw i32 [[A]] to i24
 ; CHECK-NEXT:    ret i24 [[C]]
 ;
   %m = and i32 %x, 7
@@ -194,7 +194,7 @@ define i32 @same_source_matching_signbits_extra_use(i32 %x) {
 ; CHECK-LABEL: @same_source_matching_signbits_extra_use(
 ; CHECK-NEXT:    [[M:%.*]] = and i32 [[X:%.*]], 7
 ; CHECK-NEXT:    [[A:%.*]] = shl nsw i32 -1, [[M]]
-; CHECK-NEXT:    [[B:%.*]] = trunc i32 [[A]] to i8
+; CHECK-NEXT:    [[B:%.*]] = trunc nsw i32 [[A]] to i8
 ; CHECK-NEXT:    call void @use8(i8 [[B]])
 ; CHECK-NEXT:    ret i32 [[A]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/sext.ll b/llvm/test/Transforms/InstCombine/sext.ll
index 186745362a448b..e3b6058ce7f806 100644
--- a/llvm/test/Transforms/InstCombine/sext.ll
+++ b/llvm/test/Transforms/InstCombine/sext.ll
@@ -385,7 +385,7 @@ define i16 @smear_set_bit_
diff erent_dest_type(i32 %x) {
 ; CHECK-LABEL: @smear_set_bit_
diff erent_dest_type(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 24
 ; CHECK-NEXT:    [[TMP2:%.*]] = ashr i32 [[TMP1]], 31
-; CHECK-NEXT:    [[S:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT:    [[S:%.*]] = trunc nsw i32 [[TMP2]] to i16
 ; CHECK-NEXT:    ret i16 [[S]]
 ;
   %t = trunc i32 %x to i8

diff  --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll
index aa3a238e0949ce..7f948848844c5a 100644
--- a/llvm/test/Transforms/InstCombine/shift-add.ll
+++ b/llvm/test/Transforms/InstCombine/shift-add.ll
@@ -742,7 +742,7 @@ define <3 x i32> @add3_i96(<3 x i32> %0, <3 x i32> %1) {
 ; CHECK-NEXT:    [[TMP13:%.*]] = extractelement <3 x i32> [[TMP1]], i64 2
 ; CHECK-NEXT:    [[TMP14:%.*]] = add i32 [[TMP13]], [[TMP12]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = lshr i64 [[TMP11]], 32
-; CHECK-NEXT:    [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32
+; CHECK-NEXT:    [[TMP16:%.*]] = trunc nuw nsw i64 [[TMP15]] to i32
 ; CHECK-NEXT:    [[TMP17:%.*]] = add i32 [[TMP14]], [[TMP16]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <3 x i32> poison, i32 [[ADD_NARROWED]], i64 0
 ; CHECK-NEXT:    [[TMP19:%.*]] = trunc i64 [[TMP11]] to i32

diff  --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll
index 60a7dce2a87534..a0a3c8edfb4b5d 100644
--- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll
@@ -139,7 +139,7 @@ define i1 @n4(i32 %x, i32 %len) {
 ; CHECK-NEXT:    [[T2:%.*]] = add i32 [[LEN]], -16
 ; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext nneg i32 [[T2]] to i64
 ; CHECK-NEXT:    [[T3:%.*]] = lshr i64 262143, [[T2_WIDE]]
-; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc i64 [[T3]] to i32
+; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc nuw nsw i64 [[T3]] to i32
 ; CHECK-NEXT:    [[T4:%.*]] = and i32 [[T1]], [[T3_TRUNC]]
 ; CHECK-NEXT:    [[T5:%.*]] = icmp ne i32 [[T4]], 0
 ; CHECK-NEXT:    ret i1 [[T5]]
@@ -229,7 +229,7 @@ define <2 x i1> @n8_vec(<2 x i32> %x, <2 x i32> %len) {
 ; CHECK-NEXT:    [[T2:%.*]] = add <2 x i32> [[LEN]], <i32 -16, i32 -16>
 ; CHECK-NEXT:    [[T2_WIDE:%.*]] = zext nneg <2 x i32> [[T2]] to <2 x i64>
 ; CHECK-NEXT:    [[T3:%.*]] = lshr <2 x i64> <i64 131071, i64 262143>, [[T2_WIDE]]
-; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc <2 x i64> [[T3]] to <2 x i32>
+; CHECK-NEXT:    [[T3_TRUNC:%.*]] = trunc nuw nsw <2 x i64> [[T3]] to <2 x i32>
 ; CHECK-NEXT:    [[T4:%.*]] = and <2 x i32> [[T1]], [[T3_TRUNC]]
 ; CHECK-NEXT:    [[T5:%.*]] = icmp ne <2 x i32> [[T4]], zeroinitializer
 ; CHECK-NEXT:    ret <2 x i1> [[T5]]

diff  --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll
index 6773cbac1d1e86..84dd4c57ebc619 100644
--- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll
+++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-ashr.ll
@@ -13,7 +13,7 @@
 define i16 @t0(i32 %x, i16 %y) {
 ; CHECK-LABEL: @t0(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[T5:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    [[T5:%.*]] = trunc nsw i32 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[T5]]
 ;
   %t0 = sub i16 32, %y
@@ -30,7 +30,7 @@ define i16 @t0(i32 %x, i16 %y) {
 define <2 x i16> @t1_vec_splat(<2 x i32> %x, <2 x i16> %y) {
 ; CHECK-LABEL: @t1_vec_splat(
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr <2 x i32> [[X:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[T5:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i16>
+; CHECK-NEXT:    [[T5:%.*]] = trunc nsw <2 x i32> [[TMP1]] to <2 x i16>
 ; CHECK-NEXT:    ret <2 x i16> [[T5]]
 ;
   %t0 = sub <2 x i16> <i16 32, i16 32>, %y
@@ -100,7 +100,7 @@ define i16 @t6_extrause0(i32 %x, i16 %y) {
 ; CHECK-NEXT:    [[T3:%.*]] = trunc i32 [[T2]] to i16
 ; CHECK-NEXT:    call void @use16(i16 [[T3]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X]], 31
-; CHECK-NEXT:    [[T5:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    [[T5:%.*]] = trunc nsw i32 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[T5]]
 ;
   %t0 = sub i16 32, %y
@@ -118,7 +118,7 @@ define i16 @t7_extrause1(i32 %x, i16 %y) {
 ; CHECK-NEXT:    [[T4:%.*]] = add i16 [[Y:%.*]], -1
 ; CHECK-NEXT:    call void @use16(i16 [[T4]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = ashr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[T5:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    [[T5:%.*]] = trunc nsw i32 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[T5]]
 ;
   %t0 = sub i16 32, %y

diff  --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll
index 63099a8af81f6c..214ec88d2e551d 100644
--- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll
+++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-with-truncation-lshr.ll
@@ -13,7 +13,7 @@
 define i16 @t0(i32 %x, i16 %y) {
 ; CHECK-LABEL: @t0(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[T5:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    [[T5:%.*]] = trunc nuw nsw i32 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[T5]]
 ;
   %t0 = sub i16 32, %y
@@ -30,7 +30,7 @@ define i16 @t0(i32 %x, i16 %y) {
 define <2 x i16> @t1_vec_splat(<2 x i32> %x, <2 x i16> %y) {
 ; CHECK-LABEL: @t1_vec_splat(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 31, i32 31>
-; CHECK-NEXT:    [[T5:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i16>
+; CHECK-NEXT:    [[T5:%.*]] = trunc nuw nsw <2 x i32> [[TMP1]] to <2 x i16>
 ; CHECK-NEXT:    ret <2 x i16> [[T5]]
 ;
   %t0 = sub <2 x i16> <i16 32, i16 32>, %y
@@ -100,7 +100,7 @@ define i16 @t6_extrause0(i32 %x, i16 %y) {
 ; CHECK-NEXT:    [[T3:%.*]] = trunc i32 [[T2]] to i16
 ; CHECK-NEXT:    call void @use16(i16 [[T3]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X]], 31
-; CHECK-NEXT:    [[T5:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    [[T5:%.*]] = trunc nuw nsw i32 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[T5]]
 ;
   %t0 = sub i16 32, %y
@@ -118,7 +118,7 @@ define i16 @t7_extrause1(i32 %x, i16 %y) {
 ; CHECK-NEXT:    [[T4:%.*]] = add i16 [[Y:%.*]], -1
 ; CHECK-NEXT:    call void @use16(i16 [[T4]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
-; CHECK-NEXT:    [[T5:%.*]] = trunc i32 [[TMP1]] to i16
+; CHECK-NEXT:    [[T5:%.*]] = trunc nuw nsw i32 [[TMP1]] to i16
 ; CHECK-NEXT:    ret i16 [[T5]]
 ;
   %t0 = sub i16 32, %y

diff  --git a/llvm/test/Transforms/InstCombine/shift-shift.ll b/llvm/test/Transforms/InstCombine/shift-shift.ll
index 8a40863300d45f..7c35718601ba7f 100644
--- a/llvm/test/Transforms/InstCombine/shift-shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift-shift.ll
@@ -166,7 +166,7 @@ define i8 @shl_trunc_smaller_lshr(i32 %x) {
 define i24 @shl_trunc_bigger_ashr(i32 %x) {
 ; CHECK-LABEL: @shl_trunc_bigger_ashr(
 ; CHECK-NEXT:    [[SH_DIFF:%.*]] = ashr i32 [[X:%.*]], 9
-; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = trunc i32 [[SH_DIFF]] to i24
+; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = trunc nsw i32 [[SH_DIFF]] to i24
 ; CHECK-NEXT:    [[LT:%.*]] = and i24 [[TR_SH_DIFF]], -8
 ; CHECK-NEXT:    ret i24 [[LT]]
 ;
@@ -502,7 +502,7 @@ define <2 x i6> @shl_lshr_demand5_undef_left(<2 x i8> %x) {
 ; CHECK-LABEL: @shl_lshr_demand5_undef_left(
 ; CHECK-NEXT:    [[SHL:%.*]] = shl <2 x i8> <i8 undef, i8 -108>, [[X:%.*]]
 ; CHECK-NEXT:    [[LSHR:%.*]] = lshr <2 x i8> [[SHL]], <i8 2, i8 2>
-; CHECK-NEXT:    [[R:%.*]] = trunc <2 x i8> [[LSHR]] to <2 x i6>
+; CHECK-NEXT:    [[R:%.*]] = trunc nuw <2 x i8> [[LSHR]] to <2 x i6>
 ; CHECK-NEXT:    ret <2 x i6> [[R]]
 ;
   %shl = shl <2 x i8> <i8 undef, i8 148>, %x ; 0b1001_0100
@@ -561,7 +561,7 @@ define <2 x i6> @shl_lshr_demand5_nonuniform_vec_both(<2 x i8> %x) {
 ; CHECK-LABEL: @shl_lshr_demand5_nonuniform_vec_both(
 ; CHECK-NEXT:    [[SHL:%.*]] = shl <2 x i8> <i8 -104, i8 -108>, [[X:%.*]]
 ; CHECK-NEXT:    [[LSHR:%.*]] = lshr <2 x i8> [[SHL]], <i8 3, i8 2>
-; CHECK-NEXT:    [[R:%.*]] = trunc <2 x i8> [[LSHR]] to <2 x i6>
+; CHECK-NEXT:    [[R:%.*]] = trunc nuw <2 x i8> [[LSHR]] to <2 x i6>
 ; CHECK-NEXT:    ret <2 x i6> [[R]]
 ;
   %shl = shl <2 x i8> <i8 152, i8 148>, %x ; 0b1001_1000, 0b1001_0100

diff  --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll
index bef7fc81a7d1f9..bb8661919c89f5 100644
--- a/llvm/test/Transforms/InstCombine/shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift.ll
@@ -423,7 +423,7 @@ define i32 @test29(i64 %d18) {
 ; CHECK-LABEL: @test29(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr i64 [[D18:%.*]], 63
-; CHECK-NEXT:    [[I101:%.*]] = trunc i64 [[SUM_SHIFT]] to i32
+; CHECK-NEXT:    [[I101:%.*]] = trunc nuw nsw i64 [[SUM_SHIFT]] to i32
 ; CHECK-NEXT:    ret i32 [[I101]]
 ;
 entry:
@@ -437,7 +437,7 @@ define <2 x i32> @test29_uniform(<2 x i64> %d18) {
 ; CHECK-LABEL: @test29_uniform(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[SUM_SHIFT:%.*]] = lshr <2 x i64> [[D18:%.*]], <i64 63, i64 63>
-; CHECK-NEXT:    [[I101:%.*]] = trunc <2 x i64> [[SUM_SHIFT]] to <2 x i32>
+; CHECK-NEXT:    [[I101:%.*]] = trunc nuw nsw <2 x i64> [[SUM_SHIFT]] to <2 x i32>
 ; CHECK-NEXT:    ret <2 x i32> [[I101]]
 ;
 entry:
@@ -466,7 +466,7 @@ define <2 x i32> @test29_poison(<2 x i64> %d18) {
 ; CHECK-LABEL: @test29_poison(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[I916:%.*]] = lshr <2 x i64> [[D18:%.*]], <i64 32, i64 poison>
-; CHECK-NEXT:    [[I917:%.*]] = trunc <2 x i64> [[I916]] to <2 x i32>
+; CHECK-NEXT:    [[I917:%.*]] = trunc nuw <2 x i64> [[I916]] to <2 x i32>
 ; CHECK-NEXT:    [[I10:%.*]] = lshr <2 x i32> [[I917]], <i32 31, i32 poison>
 ; CHECK-NEXT:    ret <2 x i32> [[I10]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/shl-demand.ll b/llvm/test/Transforms/InstCombine/shl-demand.ll
index 26175ebbe15358..08e6e745818489 100644
--- a/llvm/test/Transforms/InstCombine/shl-demand.ll
+++ b/llvm/test/Transforms/InstCombine/shl-demand.ll
@@ -222,7 +222,7 @@ define i8 @must_drop_poison(i32 %x, i32 %y)  {
 define i32 @f_t15_t01_t09(i40 %t2) {
 ; CHECK-LABEL: @f_t15_t01_t09(
 ; CHECK-NEXT:    [[SH_DIFF:%.*]] = ashr i40 [[T2:%.*]], 15
-; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = trunc i40 [[SH_DIFF]] to i32
+; CHECK-NEXT:    [[TR_SH_DIFF:%.*]] = trunc nsw i40 [[SH_DIFF]] to i32
 ; CHECK-NEXT:    [[SHL1:%.*]] = and i32 [[TR_SH_DIFF]], -65536
 ; CHECK-NEXT:    ret i32 [[SHL1]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/sign-bit-test-via-right-shifting-all-other-bits.ll b/llvm/test/Transforms/InstCombine/sign-bit-test-via-right-shifting-all-other-bits.ll
index d3ac6cfa9c6017..30fad66bf5218d 100644
--- a/llvm/test/Transforms/InstCombine/sign-bit-test-via-right-shifting-all-other-bits.ll
+++ b/llvm/test/Transforms/InstCombine/sign-bit-test-via-right-shifting-all-other-bits.ll
@@ -322,7 +322,7 @@ define i1 @unsigned_sign_bit_extract_with_trunc_extrause(i64 %x) {
 ; CHECK-LABEL: @unsigned_sign_bit_extract_with_trunc_extrause(
 ; CHECK-NEXT:    [[SIGNBIT:%.*]] = lshr i64 [[X:%.*]], 63
 ; CHECK-NEXT:    call void @use64(i64 [[SIGNBIT]])
-; CHECK-NEXT:    [[SIGNBIT_NARROW:%.*]] = trunc i64 [[SIGNBIT]] to i32
+; CHECK-NEXT:    [[SIGNBIT_NARROW:%.*]] = trunc nuw nsw i64 [[SIGNBIT]] to i32
 ; CHECK-NEXT:    call void @use32(i32 [[SIGNBIT_NARROW]])
 ; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i64 [[X]], 0
 ; CHECK-NEXT:    ret i1 [[ISNEG]]
@@ -348,7 +348,7 @@ define i1 @signed_sign_bit_extract_trunc_extrause(i64 %x) {
 ; CHECK-LABEL: @signed_sign_bit_extract_trunc_extrause(
 ; CHECK-NEXT:    [[SIGNSMEAR:%.*]] = ashr i64 [[X:%.*]], 63
 ; CHECK-NEXT:    call void @use64(i64 [[SIGNSMEAR]])
-; CHECK-NEXT:    [[SIGNSMEAR_NARROW:%.*]] = trunc i64 [[SIGNSMEAR]] to i32
+; CHECK-NEXT:    [[SIGNSMEAR_NARROW:%.*]] = trunc nsw i64 [[SIGNSMEAR]] to i32
 ; CHECK-NEXT:    call void @use32(i32 [[SIGNSMEAR_NARROW]])
 ; CHECK-NEXT:    [[ISNEG:%.*]] = icmp slt i64 [[X]], 0
 ; CHECK-NEXT:    ret i1 [[ISNEG]]

diff  --git a/llvm/test/Transforms/InstCombine/trunc-demand.ll b/llvm/test/Transforms/InstCombine/trunc-demand.ll
index 4f6e79285eaa89..9d7bf589268e2b 100644
--- a/llvm/test/Transforms/InstCombine/trunc-demand.ll
+++ b/llvm/test/Transforms/InstCombine/trunc-demand.ll
@@ -36,7 +36,7 @@ define i6 @trunc_lshr_exact_mask(i8 %x) {
 define i6 @trunc_lshr_big_mask(i8 %x) {
 ; CHECK-LABEL: @trunc_lshr_big_mask(
 ; CHECK-NEXT:    [[S:%.*]] = lshr i8 [[X:%.*]], 2
-; CHECK-NEXT:    [[T:%.*]] = trunc i8 [[S]] to i6
+; CHECK-NEXT:    [[T:%.*]] = trunc nuw i8 [[S]] to i6
 ; CHECK-NEXT:    [[R:%.*]] = and i6 [[T]], 31
 ; CHECK-NEXT:    ret i6 [[R]]
 ;
@@ -52,7 +52,7 @@ define i6 @trunc_lshr_use1(i8 %x) {
 ; CHECK-LABEL: @trunc_lshr_use1(
 ; CHECK-NEXT:    [[S:%.*]] = lshr i8 [[X:%.*]], 2
 ; CHECK-NEXT:    call void @use8(i8 [[S]])
-; CHECK-NEXT:    [[T:%.*]] = trunc i8 [[S]] to i6
+; CHECK-NEXT:    [[T:%.*]] = trunc nuw i8 [[S]] to i6
 ; CHECK-NEXT:    [[R:%.*]] = and i6 [[T]], 15
 ; CHECK-NEXT:    ret i6 [[R]]
 ;
@@ -68,7 +68,7 @@ define i6 @trunc_lshr_use1(i8 %x) {
 define i6 @trunc_lshr_use2(i8 %x) {
 ; CHECK-LABEL: @trunc_lshr_use2(
 ; CHECK-NEXT:    [[S:%.*]] = lshr i8 [[X:%.*]], 2
-; CHECK-NEXT:    [[T:%.*]] = trunc i8 [[S]] to i6
+; CHECK-NEXT:    [[T:%.*]] = trunc nuw i8 [[S]] to i6
 ; CHECK-NEXT:    call void @use6(i6 [[T]])
 ; CHECK-NEXT:    [[R:%.*]] = and i6 [[T]], 15
 ; CHECK-NEXT:    ret i6 [[R]]
@@ -157,7 +157,7 @@ define i6 @or_trunc_lshr_more(i8 %x) {
 define i6 @or_trunc_lshr_small_mask(i8 %x) {
 ; CHECK-LABEL: @or_trunc_lshr_small_mask(
 ; CHECK-NEXT:    [[S:%.*]] = lshr i8 [[X:%.*]], 4
-; CHECK-NEXT:    [[T:%.*]] = trunc i8 [[S]] to i6
+; CHECK-NEXT:    [[T:%.*]] = trunc nuw nsw i8 [[S]] to i6
 ; CHECK-NEXT:    [[R:%.*]] = or i6 [[T]], -8
 ; CHECK-NEXT:    ret i6 [[R]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
index 87c90bb91f39eb..4c857125365a9b 100644
--- a/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/trunc-inseltpoison.ll
@@ -171,7 +171,7 @@ define i32 @test5(i32 %A) {
 define i32 @test6(i64 %A) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 32
-; CHECK-NEXT:    [[D:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT:    [[D:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
   %B = zext i64 %A to i128
@@ -459,7 +459,7 @@ define <2 x i64> @test12_vec_undef(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128>
 ; CHECK-NEXT:    [[E:%.*]] = and <2 x i128> [[D]], <i128 31, i128 undef>
 ; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i128> [[C]], [[E]]
-; CHECK-NEXT:    [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64>
+; CHECK-NEXT:    [[G:%.*]] = trunc nuw nsw <2 x i128> [[F]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[G]]
 ;
   %C = zext <2 x i32> %A to <2 x i128>
@@ -524,7 +524,7 @@ define <2 x i64> @test13_vec_undef(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128>
 ; CHECK-NEXT:    [[E:%.*]] = and <2 x i128> [[D]], <i128 31, i128 undef>
 ; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i128> [[C]], [[E]]
-; CHECK-NEXT:    [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64>
+; CHECK-NEXT:    [[G:%.*]] = trunc nsw <2 x i128> [[F]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[G]]
 ;
   %C = sext <2 x i32> %A to <2 x i128>

diff  --git a/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll b/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll
index e578b604c9d6ae..2c5f428cf98de5 100644
--- a/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/trunc-shift-trunc.ll
@@ -72,7 +72,7 @@ define i8 @trunc_lshr_trunc_outofrange(i64 %a) {
 ; CHECK-LABEL: @trunc_lshr_trunc_outofrange(
 ; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
 ; CHECK-NEXT:    [[C:%.*]] = lshr i32 [[B]], 25
-; CHECK-NEXT:    [[D:%.*]] = trunc i32 [[C]] to i8
+; CHECK-NEXT:    [[D:%.*]] = trunc nuw nsw i32 [[C]] to i8
 ; CHECK-NEXT:    ret i8 [[D]]
 ;
   %b = trunc i64 %a to i32
@@ -158,7 +158,7 @@ define i8 @trunc_ashr_trunc_outofrange(i64 %a) {
 ; CHECK-LABEL: @trunc_ashr_trunc_outofrange(
 ; CHECK-NEXT:    [[B:%.*]] = trunc i64 [[A:%.*]] to i32
 ; CHECK-NEXT:    [[C:%.*]] = ashr i32 [[B]], 25
-; CHECK-NEXT:    [[D:%.*]] = trunc i32 [[C]] to i8
+; CHECK-NEXT:    [[D:%.*]] = trunc nsw i32 [[C]] to i8
 ; CHECK-NEXT:    ret i8 [[D]]
 ;
   %b = trunc i64 %a to i32

diff  --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll
index 760825d6b1da0d..c77d7269f2cf7d 100644
--- a/llvm/test/Transforms/InstCombine/trunc.ll
+++ b/llvm/test/Transforms/InstCombine/trunc.ll
@@ -171,7 +171,7 @@ define i32 @test5(i32 %A) {
 define i32 @test6(i64 %A) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 32
-; CHECK-NEXT:    [[D:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT:    [[D:%.*]] = trunc nuw i64 [[TMP1]] to i32
 ; CHECK-NEXT:    ret i32 [[D]]
 ;
   %B = zext i64 %A to i128
@@ -459,7 +459,7 @@ define <2 x i64> @test12_vec_undef(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128>
 ; CHECK-NEXT:    [[E:%.*]] = and <2 x i128> [[D]], <i128 31, i128 undef>
 ; CHECK-NEXT:    [[F:%.*]] = lshr <2 x i128> [[C]], [[E]]
-; CHECK-NEXT:    [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64>
+; CHECK-NEXT:    [[G:%.*]] = trunc nuw nsw <2 x i128> [[F]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[G]]
 ;
   %C = zext <2 x i32> %A to <2 x i128>
@@ -524,7 +524,7 @@ define <2 x i64> @test13_vec_undef(<2 x i32> %A, <2 x i32> %B) {
 ; CHECK-NEXT:    [[D:%.*]] = zext <2 x i32> [[B:%.*]] to <2 x i128>
 ; CHECK-NEXT:    [[E:%.*]] = and <2 x i128> [[D]], <i128 31, i128 undef>
 ; CHECK-NEXT:    [[F:%.*]] = ashr <2 x i128> [[C]], [[E]]
-; CHECK-NEXT:    [[G:%.*]] = trunc <2 x i128> [[F]] to <2 x i64>
+; CHECK-NEXT:    [[G:%.*]] = trunc nsw <2 x i128> [[F]] to <2 x i64>
 ; CHECK-NEXT:    ret <2 x i64> [[G]]
 ;
   %C = sext <2 x i32> %A to <2 x i128>

diff  --git a/llvm/test/Transforms/InstCombine/truncating-saturate.ll b/llvm/test/Transforms/InstCombine/truncating-saturate.ll
index e4df94afd17418..c0111528e2a4d8 100644
--- a/llvm/test/Transforms/InstCombine/truncating-saturate.ll
+++ b/llvm/test/Transforms/InstCombine/truncating-saturate.ll
@@ -10,7 +10,7 @@ define i8 @testi16i8(i16 %add) {
 ; CHECK-LABEL: @testi16i8(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.smax.i16(i16 [[ADD:%.*]], i16 -128)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smin.i16(i16 [[TMP1]], i16 127)
-; CHECK-NEXT:    [[COND_I:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[COND_I:%.*]] = trunc nsw i16 [[TMP2]] to i8
 ; CHECK-NEXT:    ret i8 [[COND_I]]
 ;
   %sh = lshr i16 %add, 8
@@ -29,7 +29,7 @@ define i32 @testi64i32(i64 %add) {
 ; CHECK-LABEL: @testi64i32(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.smax.i64(i64 [[ADD:%.*]], i64 -2147483648)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.smin.i64(i64 [[TMP1]], i64 2147483647)
-; CHECK-NEXT:    [[COND_I:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT:    [[COND_I:%.*]] = trunc nsw i64 [[TMP2]] to i32
 ; CHECK-NEXT:    ret i32 [[COND_I]]
 ;
   %sh = lshr i64 %add, 32
@@ -48,7 +48,7 @@ define i16 @testi32i16i8(i32 %add) {
 ; CHECK-LABEL: @testi32i16i8(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.smax.i32(i32 [[ADD:%.*]], i32 -128)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smin.i32(i32 [[TMP1]], i32 127)
-; CHECK-NEXT:    [[R:%.*]] = trunc i32 [[TMP2]] to i16
+; CHECK-NEXT:    [[R:%.*]] = trunc nsw i32 [[TMP2]] to i16
 ; CHECK-NEXT:    ret i16 [[R]]
 ;
   %a = add i32 %add, 128
@@ -64,7 +64,7 @@ define <4 x i16> @testv4i32i16i8(<4 x i32> %add) {
 ; CHECK-LABEL: @testv4i32i16i8(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[ADD:%.*]], <4 x i32> <i32 -128, i32 -128, i32 -128, i32 -128>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[TMP1]], <4 x i32> <i32 127, i32 127, i32 127, i32 127>)
-; CHECK-NEXT:    [[R:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16>
+; CHECK-NEXT:    [[R:%.*]] = trunc nsw <4 x i32> [[TMP2]] to <4 x i16>
 ; CHECK-NEXT:    ret <4 x i16> [[R]]
 ;
   %a = add <4 x i32> %add, <i32 128, i32 128, i32 128, i32 128>
@@ -149,7 +149,7 @@ define <4 x i8> @testv4i16i8(<4 x i16> %add) {
 ; CHECK-LABEL: @testv4i16i8(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call <4 x i16> @llvm.smax.v4i16(<4 x i16> [[ADD:%.*]], <4 x i16> <i16 -128, i16 -128, i16 -128, i16 -128>)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <4 x i16> @llvm.smin.v4i16(<4 x i16> [[TMP1]], <4 x i16> <i16 127, i16 127, i16 127, i16 127>)
-; CHECK-NEXT:    [[COND_I:%.*]] = trunc <4 x i16> [[TMP2]] to <4 x i8>
+; CHECK-NEXT:    [[COND_I:%.*]] = trunc nsw <4 x i16> [[TMP2]] to <4 x i8>
 ; CHECK-NEXT:    ret <4 x i8> [[COND_I]]
 ;
   %sh = lshr <4 x i16> %add, <i16 8, i16 8, i16 8, i16 8>
@@ -188,7 +188,7 @@ define i8 @testi16i8_revcmp(i16 %add) {
 ; CHECK-LABEL: @testi16i8_revcmp(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.smax.i16(i16 [[ADD:%.*]], i16 -128)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smin.i16(i16 [[TMP1]], i16 127)
-; CHECK-NEXT:    [[COND_I:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[COND_I:%.*]] = trunc nsw i16 [[TMP2]] to i8
 ; CHECK-NEXT:    ret i8 [[COND_I]]
 ;
   %sh = lshr i16 %add, 8
@@ -207,7 +207,7 @@ define i8 @testi16i8_revselect(i16 %add) {
 ; CHECK-LABEL: @testi16i8_revselect(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i16 @llvm.smax.i16(i16 [[ADD:%.*]], i16 -128)
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smin.i16(i16 [[TMP1]], i16 127)
-; CHECK-NEXT:    [[COND_I:%.*]] = trunc i16 [[TMP2]] to i8
+; CHECK-NEXT:    [[COND_I:%.*]] = trunc nsw i16 [[TMP2]] to i8
 ; CHECK-NEXT:    ret i8 [[COND_I]]
 ;
   %sh = lshr i16 %add, 8
@@ -268,7 +268,7 @@ define i16 @
diff erentconsts(i32 %x, i16 %replacement_low, i16 %replacement_high)
 define i8 @badimm1(i16 %add) {
 ; CHECK-LABEL: @badimm1(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i16 [[ADD:%.*]], 9
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i16 [[SH]] to i8
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw nsw i16 [[SH]] to i8
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = trunc i16 [[ADD]] to i8
 ; CHECK-NEXT:    [[SHR2_I:%.*]] = ashr i8 [[CONV1_I]], 7
 ; CHECK-NEXT:    [[CMP_NOT_I:%.*]] = icmp eq i8 [[SHR2_I]], [[CONV_I]]
@@ -292,7 +292,7 @@ define i8 @badimm1(i16 %add) {
 define i8 @badimm2(i16 %add) {
 ; CHECK-LABEL: @badimm2(
 ; CHECK-NEXT:    [[SH:%.*]] = lshr i16 [[ADD:%.*]], 8
-; CHECK-NEXT:    [[CONV_I:%.*]] = trunc i16 [[SH]] to i8
+; CHECK-NEXT:    [[CONV_I:%.*]] = trunc nuw i16 [[SH]] to i8
 ; CHECK-NEXT:    [[CONV1_I:%.*]] = trunc i16 [[ADD]] to i8
 ; CHECK-NEXT:    [[SHR2_I:%.*]] = ashr i8 [[CONV1_I]], 6
 ; CHECK-NEXT:    [[CMP_NOT_I:%.*]] = icmp eq i8 [[SHR2_I]], [[CONV_I]]
@@ -319,7 +319,7 @@ define i8 @badimm3(i16 %add) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[ADD]], 128
 ; CHECK-NEXT:    [[CMP_NOT_I:%.*]] = icmp ult i16 [[TMP1]], 256
 ; CHECK-NEXT:    [[SHR4_I:%.*]] = ashr i16 [[ADD]], 14
-; CHECK-NEXT:    [[CONV5_I:%.*]] = trunc i16 [[SHR4_I]] to i8
+; CHECK-NEXT:    [[CONV5_I:%.*]] = trunc nsw i16 [[SHR4_I]] to i8
 ; CHECK-NEXT:    [[XOR_I:%.*]] = xor i8 [[CONV5_I]], 127
 ; CHECK-NEXT:    [[COND_I:%.*]] = select i1 [[CMP_NOT_I]], i8 [[CONV1_I]], i8 [[XOR_I]]
 ; CHECK-NEXT:    ret i8 [[COND_I]]

diff  --git a/llvm/test/Transforms/InstCombine/vector-trunc.ll b/llvm/test/Transforms/InstCombine/vector-trunc.ll
index eeb5a3fdb73988..bccb12e66eba19 100644
--- a/llvm/test/Transforms/InstCombine/vector-trunc.ll
+++ b/llvm/test/Transforms/InstCombine/vector-trunc.ll
@@ -4,7 +4,7 @@
 define <4 x i16> @trunc_add_nsw(<4 x i32> %0) {
 ; CHECK-LABEL: @trunc_add_nsw(
 ; CHECK-NEXT:    [[TMP2:%.*]] = ashr <4 x i32> [[TMP0:%.*]], <i32 17, i32 17, i32 17, i32 17>
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16>
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc nsw <4 x i32> [[TMP2]] to <4 x i16>
 ; CHECK-NEXT:    [[TMP4:%.*]] = add nsw <4 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1>
 ; CHECK-NEXT:    ret <4 x i16> [[TMP4]]
 ;
@@ -17,7 +17,7 @@ define <4 x i16> @trunc_add_nsw(<4 x i32> %0) {
 define <4 x i16> @trunc_add_no_nsw(<4 x i32> %0) {
 ; CHECK-LABEL: @trunc_add_no_nsw(
 ; CHECK-NEXT:    [[TMP2:%.*]] = lshr <4 x i32> [[TMP0:%.*]], <i32 16, i32 16, i32 16, i32 16>
-; CHECK-NEXT:    [[TMP3:%.*]] = trunc <4 x i32> [[TMP2]] to <4 x i16>
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc nuw <4 x i32> [[TMP2]] to <4 x i16>
 ; CHECK-NEXT:    [[TMP4:%.*]] = add <4 x i16> [[TMP3]], <i16 1, i16 1, i16 1, i16 1>
 ; CHECK-NEXT:    ret <4 x i16> [[TMP4]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/xor-ashr.ll b/llvm/test/Transforms/InstCombine/xor-ashr.ll
index 32ca8e338c2ccb..097e04b3b9cb56 100644
--- a/llvm/test/Transforms/InstCombine/xor-ashr.ll
+++ b/llvm/test/Transforms/InstCombine/xor-ashr.ll
@@ -80,7 +80,7 @@ define <4 x i8> @testv4i16i8_undef(<4 x i16> %add) {
 define i8 @wrongimm(i16 %add) {
 ; CHECK-LABEL: @wrongimm(
 ; CHECK-NEXT:    [[SH:%.*]] = ashr i16 [[ADD:%.*]], 14
-; CHECK-NEXT:    [[T:%.*]] = trunc i16 [[SH]] to i8
+; CHECK-NEXT:    [[T:%.*]] = trunc nsw i16 [[SH]] to i8
 ; CHECK-NEXT:    [[X:%.*]] = xor i8 [[T]], 27
 ; CHECK-NEXT:    ret i8 [[X]]
 ;
@@ -140,7 +140,7 @@ define i16 @extrause_trunc1(i32 %add) {
 define i16 @extrause_trunc2(i32 %add) {
 ; CHECK-LABEL: @extrause_trunc2(
 ; CHECK-NEXT:    [[SH:%.*]] = ashr i32 [[ADD:%.*]], 31
-; CHECK-NEXT:    [[T:%.*]] = trunc i32 [[SH]] to i16
+; CHECK-NEXT:    [[T:%.*]] = trunc nsw i32 [[SH]] to i16
 ; CHECK-NEXT:    call void @use16(i16 [[T]])
 ; CHECK-NEXT:    [[X:%.*]] = xor i16 [[T]], 127
 ; CHECK-NEXT:    ret i16 [[X]]

diff  --git a/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll b/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll
index f082873bf7839e..c8eb513a8440b4 100644
--- a/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll
+++ b/llvm/test/Transforms/InstCombine/zext-ctlz-trunc-to-ctlz-add.ll
@@ -57,7 +57,7 @@ define <2 x i17> @trunc_ctlz_zext_v2i17_v2i32_multiple_uses(<2 x i17> %x) {
 ; CHECK-LABEL: @trunc_ctlz_zext_v2i17_v2i32_multiple_uses(
 ; CHECK-NEXT:    [[Z:%.*]] = zext <2 x i17> [[X:%.*]] to <2 x i32>
 ; CHECK-NEXT:    [[P:%.*]] = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[Z]], i1 false), !range [[RNG2:![0-9]+]]
-; CHECK-NEXT:    [[ZZ:%.*]] = trunc <2 x i32> [[P]] to <2 x i17>
+; CHECK-NEXT:    [[ZZ:%.*]] = trunc nuw nsw <2 x i32> [[P]] to <2 x i17>
 ; CHECK-NEXT:    call void @use(<2 x i32> [[P]])
 ; CHECK-NEXT:    ret <2 x i17> [[ZZ]]
 ;
@@ -91,7 +91,7 @@ define i16 @trunc_ctlz_zext_i10_i32(i10 %x) {
 ; CHECK-LABEL: @trunc_ctlz_zext_i10_i32(
 ; CHECK-NEXT:    [[Z:%.*]] = zext i10 [[X:%.*]] to i32
 ; CHECK-NEXT:    [[P:%.*]] = call i32 @llvm.ctlz.i32(i32 [[Z]], i1 false), !range [[RNG3:![0-9]+]]
-; CHECK-NEXT:    [[ZZ:%.*]] = trunc i32 [[P]] to i16
+; CHECK-NEXT:    [[ZZ:%.*]] = trunc nuw nsw i32 [[P]] to i16
 ; CHECK-NEXT:    ret i16 [[ZZ]]
 ;
   %z = zext i10 %x to i32

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll
index 9a7b9f570cf770..ed8d8e15282d57 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll
@@ -34,14 +34,14 @@ define void @test_pr25490(i32 %n, ptr noalias nocapture %a, ptr noalias nocaptur
 ; CHECK-NEXT:    [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16>
 ; CHECK-NEXT:    [[TMP5:%.*]] = mul nuw <16 x i16> [[TMP3]], [[TMP4]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = lshr <16 x i16> [[TMP5]], <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-; CHECK-NEXT:    [[TMP7:%.*]] = trunc <16 x i16> [[TMP6]] to <16 x i8>
+; CHECK-NEXT:    [[TMP7:%.*]] = trunc nuw <16 x i16> [[TMP6]] to <16 x i8>
 ; CHECK-NEXT:    store <16 x i8> [[TMP7]], ptr [[TMP2]], align 1
 ; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
 ; CHECK-NEXT:    [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
 ; CHECK-NEXT:    [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i16>
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul nuw <16 x i16> [[TMP9]], [[TMP4]]
 ; CHECK-NEXT:    [[TMP11:%.*]] = lshr <16 x i16> [[TMP10]], <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-; CHECK-NEXT:    [[TMP12:%.*]] = trunc <16 x i16> [[TMP11]] to <16 x i8>
+; CHECK-NEXT:    [[TMP12:%.*]] = trunc nuw <16 x i16> [[TMP11]] to <16 x i8>
 ; CHECK-NEXT:    store <16 x i8> [[TMP12]], ptr [[TMP8]], align 1
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -67,14 +67,14 @@ define void @test_pr25490(i32 %n, ptr noalias nocapture %a, ptr noalias nocaptur
 ; CHECK-NEXT:    [[TMP17:%.*]] = zext <8 x i8> [[WIDE_LOAD8]] to <8 x i16>
 ; CHECK-NEXT:    [[TMP18:%.*]] = mul nuw <8 x i16> [[TMP16]], [[TMP17]]
 ; CHECK-NEXT:    [[TMP19:%.*]] = lshr <8 x i16> [[TMP18]], <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-; CHECK-NEXT:    [[TMP20:%.*]] = trunc <8 x i16> [[TMP19]] to <8 x i8>
+; CHECK-NEXT:    [[TMP20:%.*]] = trunc nuw <8 x i16> [[TMP19]] to <8 x i8>
 ; CHECK-NEXT:    store <8 x i8> [[TMP20]], ptr [[TMP15]], align 1
 ; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX7]]
 ; CHECK-NEXT:    [[WIDE_LOAD10:%.*]] = load <8 x i8>, ptr [[TMP21]], align 1
 ; CHECK-NEXT:    [[TMP22:%.*]] = zext <8 x i8> [[WIDE_LOAD10]] to <8 x i16>
 ; CHECK-NEXT:    [[TMP23:%.*]] = mul nuw <8 x i16> [[TMP22]], [[TMP17]]
 ; CHECK-NEXT:    [[TMP24:%.*]] = lshr <8 x i16> [[TMP23]], <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-; CHECK-NEXT:    [[TMP25:%.*]] = trunc <8 x i16> [[TMP24]] to <8 x i8>
+; CHECK-NEXT:    [[TMP25:%.*]] = trunc nuw <8 x i16> [[TMP24]] to <8 x i8>
 ; CHECK-NEXT:    store <8 x i8> [[TMP25]], ptr [[TMP21]], align 1
 ; CHECK-NEXT:    [[INDEX_NEXT11]] = add nuw i64 [[INDEX7]], 8
 ; CHECK-NEXT:    [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT11]], [[N_VEC5]]
@@ -99,14 +99,14 @@ define void @test_pr25490(i32 %n, ptr noalias nocapture %a, ptr noalias nocaptur
 ; CHECK-NEXT:    [[CONV3:%.*]] = zext i8 [[TMP28]] to i32
 ; CHECK-NEXT:    [[MUL:%.*]] = mul nuw nsw i32 [[CONV3]], [[CONV]]
 ; CHECK-NEXT:    [[SHR_26:%.*]] = lshr i32 [[MUL]], 8
-; CHECK-NEXT:    [[CONV4:%.*]] = trunc i32 [[SHR_26]] to i8
+; CHECK-NEXT:    [[CONV4:%.*]] = trunc nuw i32 [[SHR_26]] to i8
 ; CHECK-NEXT:    store i8 [[CONV4]], ptr [[ARRAYIDX2]], align 1
 ; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDVARS_IV]]
 ; CHECK-NEXT:    [[TMP29:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1
 ; CHECK-NEXT:    [[CONV9:%.*]] = zext i8 [[TMP29]] to i32
 ; CHECK-NEXT:    [[MUL10:%.*]] = mul nuw nsw i32 [[CONV9]], [[CONV]]
 ; CHECK-NEXT:    [[SHR11_27:%.*]] = lshr i32 [[MUL10]], 8
-; CHECK-NEXT:    [[CONV12:%.*]] = trunc i32 [[SHR11_27]] to i8
+; CHECK-NEXT:    [[CONV12:%.*]] = trunc nuw i32 [[SHR11_27]] to i8
 ; CHECK-NEXT:    store i8 [[CONV12]], ptr [[ARRAYIDX8]], align 1
 ; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
 ; CHECK-NEXT:    [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
@@ -172,8 +172,8 @@ define void @test_shrink_zext_in_preheader(ptr noalias %src, ptr noalias %dst, i
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul <16 x i16> [[TMP2]], [[TMP4]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = lshr <16 x i16> [[TMP5]], <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
 ; CHECK-NEXT:    [[TMP8:%.*]] = lshr <16 x i16> [[TMP6]], <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
-; CHECK-NEXT:    [[TMP9:%.*]] = trunc <16 x i16> [[TMP7]] to <16 x i8>
-; CHECK-NEXT:    [[TMP10:%.*]] = trunc <16 x i16> [[TMP8]] to <16 x i8>
+; CHECK-NEXT:    [[TMP9:%.*]] = trunc nuw <16 x i16> [[TMP7]] to <16 x i8>
+; CHECK-NEXT:    [[TMP10:%.*]] = trunc nuw <16 x i16> [[TMP8]] to <16 x i8>
 ; CHECK-NEXT:    [[TMP11:%.*]] = sext i32 [[INDEX]] to i64
 ; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP11]]
 ; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i64 16

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll
index b772a3814a64ac..0f26092f510ca7 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/intrinsiccost.ll
@@ -23,7 +23,7 @@ define void @saddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca
 ; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[TMP0]], 4294967280
-; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
+; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc nuw i64 [[N_VEC]] to i32
 ; CHECK-NEXT:    [[IND_END:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST]]
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[N_VEC]], 1
 ; CHECK-NEXT:    [[IND_END1:%.*]] = getelementptr i8, ptr [[PSRC:%.*]], i64 [[TMP1]]
@@ -138,7 +138,7 @@ define void @umin(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur
 ; CHECK:       vec.epilog.iter.check:
 ; CHECK-NEXT:    [[IND_END18:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC]]
 ; CHECK-NEXT:    [[IND_END15:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC]]
-; CHECK-NEXT:    [[DOTCAST11:%.*]] = trunc i64 [[N_VEC]] to i32
+; CHECK-NEXT:    [[DOTCAST11:%.*]] = trunc nuw i64 [[N_VEC]] to i32
 ; CHECK-NEXT:    [[IND_END12:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST11]]
 ; CHECK-NEXT:    [[N_VEC_REMAINING:%.*]] = and i64 [[TMP0]], 24
 ; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0
@@ -146,7 +146,7 @@ define void @umin(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur
 ; CHECK:       vec.epilog.ph:
 ; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
 ; CHECK-NEXT:    [[N_VEC9:%.*]] = and i64 [[TMP0]], 4294967288
-; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i64 [[N_VEC9]] to i32
+; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc nuw i64 [[N_VEC9]] to i32
 ; CHECK-NEXT:    [[IND_END10:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST]]
 ; CHECK-NEXT:    [[IND_END14:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC9]]
 ; CHECK-NEXT:    [[IND_END17:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC9]]

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll
index 23b653bbda380b..b5effe73fa73d2 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/intrinsiccost.ll
@@ -60,7 +60,7 @@ define void @uaddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca
 ; CHECK-NEXT:    [[IND_END24:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[TMP14]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = shl nuw nsw i64 [[N_VEC]], 1
 ; CHECK-NEXT:    [[IND_END21:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[TMP15]]
-; CHECK-NEXT:    [[DOTCAST17:%.*]] = trunc i64 [[N_VEC]] to i32
+; CHECK-NEXT:    [[DOTCAST17:%.*]] = trunc nuw i64 [[N_VEC]] to i32
 ; CHECK-NEXT:    [[IND_END18:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST17]]
 ; CHECK-NEXT:    [[N_VEC_REMAINING:%.*]] = and i64 [[TMP0]], 56
 ; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0
@@ -68,7 +68,7 @@ define void @uaddsat(ptr nocapture readonly %pSrc, i16 signext %offset, ptr noca
 ; CHECK:       vec.epilog.ph:
 ; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
 ; CHECK-NEXT:    [[N_VEC15:%.*]] = and i64 [[TMP0]], 4294967288
-; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i64 [[N_VEC15]] to i32
+; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc nuw i64 [[N_VEC15]] to i32
 ; CHECK-NEXT:    [[IND_END16:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = shl nuw nsw i64 [[N_VEC15]], 1
 ; CHECK-NEXT:    [[IND_END20:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[TMP16]]
@@ -183,7 +183,7 @@ define void @fshl(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur
 ; CHECK:       vec.epilog.iter.check:
 ; CHECK-NEXT:    [[IND_END24:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC]]
 ; CHECK-NEXT:    [[IND_END21:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC]]
-; CHECK-NEXT:    [[DOTCAST17:%.*]] = trunc i64 [[N_VEC]] to i32
+; CHECK-NEXT:    [[DOTCAST17:%.*]] = trunc nuw i64 [[N_VEC]] to i32
 ; CHECK-NEXT:    [[IND_END18:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST17]]
 ; CHECK-NEXT:    [[N_VEC_REMAINING:%.*]] = and i64 [[TMP0]], 112
 ; CHECK-NEXT:    [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp eq i64 [[N_VEC_REMAINING]], 0
@@ -191,7 +191,7 @@ define void @fshl(ptr nocapture readonly %pSrc, i8 signext %offset, ptr nocaptur
 ; CHECK:       vec.epilog.ph:
 ; CHECK-NEXT:    [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
 ; CHECK-NEXT:    [[N_VEC15:%.*]] = and i64 [[TMP0]], 4294967280
-; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i64 [[N_VEC15]] to i32
+; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc nuw i64 [[N_VEC15]] to i32
 ; CHECK-NEXT:    [[IND_END16:%.*]] = sub i32 [[BLOCKSIZE]], [[DOTCAST]]
 ; CHECK-NEXT:    [[IND_END20:%.*]] = getelementptr i8, ptr [[PSRC]], i64 [[N_VEC15]]
 ; CHECK-NEXT:    [[IND_END23:%.*]] = getelementptr i8, ptr [[PDST]], i64 [[N_VEC15]]

diff  --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll
index a47a38510eeeb3..b66ce4047ad95e 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction.ll
@@ -1205,7 +1205,7 @@ define i64 @reduction_with_phi_with_one_incoming_on_backedge(i16 %n, ptr %A) {
 ; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[N_VEC:%.*]] = and i32 [[TMP1]], 32764
-; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i16
+; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc nuw nsw i32 [[N_VEC]] to i16
 ; CHECK-NEXT:    [[IND_END:%.*]] = or disjoint i16 [[DOTCAST]], 1
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
@@ -1283,7 +1283,7 @@ define i64 @reduction_with_phi_with_two_incoming_on_backedge(i16 %n, ptr %A) {
 ; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[N_VEC:%.*]] = and i32 [[TMP1]], 32764
-; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i16
+; CHECK-NEXT:    [[DOTCAST:%.*]] = trunc nuw nsw i32 [[N_VEC]] to i16
 ; CHECK-NEXT:    [[IND_END:%.*]] = or disjoint i16 [[DOTCAST]], 1
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:

diff  --git a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
index d18b207e87076b..9206893cb2341e 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll
@@ -45,8 +45,8 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[TMP14:%.*]] = mul <8 x i32> [[TMP12]], [[TMP10]]
 ; CHECK-NEXT:    [[TMP15:%.*]] = lshr <8 x i32> [[TMP13]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
 ; CHECK-NEXT:    [[TMP16:%.*]] = lshr <8 x i32> [[TMP14]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-; CHECK-NEXT:    [[TMP17:%.*]] = trunc <8 x i32> [[TMP15]] to <8 x i16>
-; CHECK-NEXT:    [[TMP18:%.*]] = trunc <8 x i32> [[TMP16]] to <8 x i16>
+; CHECK-NEXT:    [[TMP17:%.*]] = trunc nuw <8 x i32> [[TMP15]] to <8 x i16>
+; CHECK-NEXT:    [[TMP18:%.*]] = trunc nuw <8 x i32> [[TMP16]] to <8 x i16>
 ; CHECK-NEXT:    [[TMP19:%.*]] = sub <8 x i16> zeroinitializer, [[TMP17]]
 ; CHECK-NEXT:    [[TMP20:%.*]] = sub <8 x i16> zeroinitializer, [[TMP18]]
 ; CHECK-NEXT:    [[TMP21:%.*]] = add nuw nsw <8 x i32> [[TMP6]], [[TMP1]]
@@ -55,8 +55,8 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[TMP24:%.*]] = mul <8 x i32> [[TMP22]], [[TMP10]]
 ; CHECK-NEXT:    [[TMP25:%.*]] = lshr <8 x i32> [[TMP23]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
 ; CHECK-NEXT:    [[TMP26:%.*]] = lshr <8 x i32> [[TMP24]], <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
-; CHECK-NEXT:    [[TMP27:%.*]] = trunc <8 x i32> [[TMP25]] to <8 x i16>
-; CHECK-NEXT:    [[TMP28:%.*]] = trunc <8 x i32> [[TMP26]] to <8 x i16>
+; CHECK-NEXT:    [[TMP27:%.*]] = trunc nuw <8 x i32> [[TMP25]] to <8 x i16>
+; CHECK-NEXT:    [[TMP28:%.*]] = trunc nuw <8 x i32> [[TMP26]] to <8 x i16>
 ; CHECK-NEXT:    [[PREDPHI:%.*]] = select <8 x i1> [[TMP3]], <8 x i16> [[TMP27]], <8 x i16> [[TMP19]]
 ; CHECK-NEXT:    [[PREDPHI34:%.*]] = select <8 x i1> [[TMP4]], <8 x i16> [[TMP28]], <8 x i16> [[TMP20]]
 ; CHECK-NEXT:    store <8 x i16> [[PREDPHI]], ptr [[DCT]], align 2, !alias.scope [[META0]], !noalias [[META3]]
@@ -83,13 +83,13 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw i32 [[CONV5]], [[CONV]]
 ; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[ADD]], [[CONV11]]
 ; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[MUL]], 16
-; CHECK-NEXT:    [[CONV12:%.*]] = trunc i32 [[SHR]] to i16
+; CHECK-NEXT:    [[CONV12:%.*]] = trunc nuw i32 [[SHR]] to i16
 ; CHECK-NEXT:    br label [[IF_END:%.*]]
 ; CHECK:       if.else:
 ; CHECK-NEXT:    [[ADD21:%.*]] = sub nsw i32 [[CONV5]], [[CONV]]
 ; CHECK-NEXT:    [[MUL25:%.*]] = mul i32 [[ADD21]], [[CONV11]]
 ; CHECK-NEXT:    [[SHR26:%.*]] = lshr i32 [[MUL25]], 16
-; CHECK-NEXT:    [[TMP33:%.*]] = trunc i32 [[SHR26]] to i16
+; CHECK-NEXT:    [[TMP33:%.*]] = trunc nuw i32 [[SHR26]] to i16
 ; CHECK-NEXT:    [[CONV28:%.*]] = sub i16 0, [[TMP33]]
 ; CHECK-NEXT:    br label [[IF_END]]
 ; CHECK:       if.end:
@@ -110,14 +110,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_1:%.*]] = sub nsw i32 [[CONV5_1]], [[CONV_1]]
 ; CHECK-NEXT:    [[MUL25_1:%.*]] = mul i32 [[ADD21_1]], [[CONV11_1]]
 ; CHECK-NEXT:    [[SHR26_1:%.*]] = lshr i32 [[MUL25_1]], 16
-; CHECK-NEXT:    [[TMP37:%.*]] = trunc i32 [[SHR26_1]] to i16
+; CHECK-NEXT:    [[TMP37:%.*]] = trunc nuw i32 [[SHR26_1]] to i16
 ; CHECK-NEXT:    [[CONV28_1:%.*]] = sub i16 0, [[TMP37]]
 ; CHECK-NEXT:    br label [[IF_END_1:%.*]]
 ; CHECK:       if.then.1:
 ; CHECK-NEXT:    [[ADD_1:%.*]] = add nuw nsw i32 [[CONV5_1]], [[CONV_1]]
 ; CHECK-NEXT:    [[MUL_1:%.*]] = mul i32 [[ADD_1]], [[CONV11_1]]
 ; CHECK-NEXT:    [[SHR_1:%.*]] = lshr i32 [[MUL_1]], 16
-; CHECK-NEXT:    [[CONV12_1:%.*]] = trunc i32 [[SHR_1]] to i16
+; CHECK-NEXT:    [[CONV12_1:%.*]] = trunc nuw i32 [[SHR_1]] to i16
 ; CHECK-NEXT:    br label [[IF_END_1]]
 ; CHECK:       if.end.1:
 ; CHECK-NEXT:    [[STOREMERGE_1:%.*]] = phi i16 [ [[CONV28_1]], [[IF_ELSE_1]] ], [ [[CONV12_1]], [[IF_THEN_1]] ]
@@ -138,14 +138,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_2:%.*]] = sub nsw i32 [[CONV5_2]], [[CONV_2]]
 ; CHECK-NEXT:    [[MUL25_2:%.*]] = mul i32 [[ADD21_2]], [[CONV11_2]]
 ; CHECK-NEXT:    [[SHR26_2:%.*]] = lshr i32 [[MUL25_2]], 16
-; CHECK-NEXT:    [[TMP41:%.*]] = trunc i32 [[SHR26_2]] to i16
+; CHECK-NEXT:    [[TMP41:%.*]] = trunc nuw i32 [[SHR26_2]] to i16
 ; CHECK-NEXT:    [[CONV28_2:%.*]] = sub i16 0, [[TMP41]]
 ; CHECK-NEXT:    br label [[IF_END_2:%.*]]
 ; CHECK:       if.then.2:
 ; CHECK-NEXT:    [[ADD_2:%.*]] = add nuw nsw i32 [[CONV5_2]], [[CONV_2]]
 ; CHECK-NEXT:    [[MUL_2:%.*]] = mul i32 [[ADD_2]], [[CONV11_2]]
 ; CHECK-NEXT:    [[SHR_2:%.*]] = lshr i32 [[MUL_2]], 16
-; CHECK-NEXT:    [[CONV12_2:%.*]] = trunc i32 [[SHR_2]] to i16
+; CHECK-NEXT:    [[CONV12_2:%.*]] = trunc nuw i32 [[SHR_2]] to i16
 ; CHECK-NEXT:    br label [[IF_END_2]]
 ; CHECK:       if.end.2:
 ; CHECK-NEXT:    [[STOREMERGE_2:%.*]] = phi i16 [ [[CONV28_2]], [[IF_ELSE_2]] ], [ [[CONV12_2]], [[IF_THEN_2]] ]
@@ -166,14 +166,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_3:%.*]] = sub nsw i32 [[CONV5_3]], [[CONV_3]]
 ; CHECK-NEXT:    [[MUL25_3:%.*]] = mul i32 [[ADD21_3]], [[CONV11_3]]
 ; CHECK-NEXT:    [[SHR26_3:%.*]] = lshr i32 [[MUL25_3]], 16
-; CHECK-NEXT:    [[TMP45:%.*]] = trunc i32 [[SHR26_3]] to i16
+; CHECK-NEXT:    [[TMP45:%.*]] = trunc nuw i32 [[SHR26_3]] to i16
 ; CHECK-NEXT:    [[CONV28_3:%.*]] = sub i16 0, [[TMP45]]
 ; CHECK-NEXT:    br label [[IF_END_3:%.*]]
 ; CHECK:       if.then.3:
 ; CHECK-NEXT:    [[ADD_3:%.*]] = add nuw nsw i32 [[CONV5_3]], [[CONV_3]]
 ; CHECK-NEXT:    [[MUL_3:%.*]] = mul i32 [[ADD_3]], [[CONV11_3]]
 ; CHECK-NEXT:    [[SHR_3:%.*]] = lshr i32 [[MUL_3]], 16
-; CHECK-NEXT:    [[CONV12_3:%.*]] = trunc i32 [[SHR_3]] to i16
+; CHECK-NEXT:    [[CONV12_3:%.*]] = trunc nuw i32 [[SHR_3]] to i16
 ; CHECK-NEXT:    br label [[IF_END_3]]
 ; CHECK:       if.end.3:
 ; CHECK-NEXT:    [[STOREMERGE_3:%.*]] = phi i16 [ [[CONV28_3]], [[IF_ELSE_3]] ], [ [[CONV12_3]], [[IF_THEN_3]] ]
@@ -194,14 +194,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_4:%.*]] = sub nsw i32 [[CONV5_4]], [[CONV_4]]
 ; CHECK-NEXT:    [[MUL25_4:%.*]] = mul i32 [[ADD21_4]], [[CONV11_4]]
 ; CHECK-NEXT:    [[SHR26_4:%.*]] = lshr i32 [[MUL25_4]], 16
-; CHECK-NEXT:    [[TMP49:%.*]] = trunc i32 [[SHR26_4]] to i16
+; CHECK-NEXT:    [[TMP49:%.*]] = trunc nuw i32 [[SHR26_4]] to i16
 ; CHECK-NEXT:    [[CONV28_4:%.*]] = sub i16 0, [[TMP49]]
 ; CHECK-NEXT:    br label [[IF_END_4:%.*]]
 ; CHECK:       if.then.4:
 ; CHECK-NEXT:    [[ADD_4:%.*]] = add nuw nsw i32 [[CONV5_4]], [[CONV_4]]
 ; CHECK-NEXT:    [[MUL_4:%.*]] = mul i32 [[ADD_4]], [[CONV11_4]]
 ; CHECK-NEXT:    [[SHR_4:%.*]] = lshr i32 [[MUL_4]], 16
-; CHECK-NEXT:    [[CONV12_4:%.*]] = trunc i32 [[SHR_4]] to i16
+; CHECK-NEXT:    [[CONV12_4:%.*]] = trunc nuw i32 [[SHR_4]] to i16
 ; CHECK-NEXT:    br label [[IF_END_4]]
 ; CHECK:       if.end.4:
 ; CHECK-NEXT:    [[STOREMERGE_4:%.*]] = phi i16 [ [[CONV28_4]], [[IF_ELSE_4]] ], [ [[CONV12_4]], [[IF_THEN_4]] ]
@@ -222,14 +222,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_5:%.*]] = sub nsw i32 [[CONV5_5]], [[CONV_5]]
 ; CHECK-NEXT:    [[MUL25_5:%.*]] = mul i32 [[ADD21_5]], [[CONV11_5]]
 ; CHECK-NEXT:    [[SHR26_5:%.*]] = lshr i32 [[MUL25_5]], 16
-; CHECK-NEXT:    [[TMP53:%.*]] = trunc i32 [[SHR26_5]] to i16
+; CHECK-NEXT:    [[TMP53:%.*]] = trunc nuw i32 [[SHR26_5]] to i16
 ; CHECK-NEXT:    [[CONV28_5:%.*]] = sub i16 0, [[TMP53]]
 ; CHECK-NEXT:    br label [[IF_END_5:%.*]]
 ; CHECK:       if.then.5:
 ; CHECK-NEXT:    [[ADD_5:%.*]] = add nuw nsw i32 [[CONV5_5]], [[CONV_5]]
 ; CHECK-NEXT:    [[MUL_5:%.*]] = mul i32 [[ADD_5]], [[CONV11_5]]
 ; CHECK-NEXT:    [[SHR_5:%.*]] = lshr i32 [[MUL_5]], 16
-; CHECK-NEXT:    [[CONV12_5:%.*]] = trunc i32 [[SHR_5]] to i16
+; CHECK-NEXT:    [[CONV12_5:%.*]] = trunc nuw i32 [[SHR_5]] to i16
 ; CHECK-NEXT:    br label [[IF_END_5]]
 ; CHECK:       if.end.5:
 ; CHECK-NEXT:    [[STOREMERGE_5:%.*]] = phi i16 [ [[CONV28_5]], [[IF_ELSE_5]] ], [ [[CONV12_5]], [[IF_THEN_5]] ]
@@ -250,14 +250,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_6:%.*]] = sub nsw i32 [[CONV5_6]], [[CONV_6]]
 ; CHECK-NEXT:    [[MUL25_6:%.*]] = mul i32 [[ADD21_6]], [[CONV11_6]]
 ; CHECK-NEXT:    [[SHR26_6:%.*]] = lshr i32 [[MUL25_6]], 16
-; CHECK-NEXT:    [[TMP57:%.*]] = trunc i32 [[SHR26_6]] to i16
+; CHECK-NEXT:    [[TMP57:%.*]] = trunc nuw i32 [[SHR26_6]] to i16
 ; CHECK-NEXT:    [[CONV28_6:%.*]] = sub i16 0, [[TMP57]]
 ; CHECK-NEXT:    br label [[IF_END_6:%.*]]
 ; CHECK:       if.then.6:
 ; CHECK-NEXT:    [[ADD_6:%.*]] = add nuw nsw i32 [[CONV5_6]], [[CONV_6]]
 ; CHECK-NEXT:    [[MUL_6:%.*]] = mul i32 [[ADD_6]], [[CONV11_6]]
 ; CHECK-NEXT:    [[SHR_6:%.*]] = lshr i32 [[MUL_6]], 16
-; CHECK-NEXT:    [[CONV12_6:%.*]] = trunc i32 [[SHR_6]] to i16
+; CHECK-NEXT:    [[CONV12_6:%.*]] = trunc nuw i32 [[SHR_6]] to i16
 ; CHECK-NEXT:    br label [[IF_END_6]]
 ; CHECK:       if.end.6:
 ; CHECK-NEXT:    [[STOREMERGE_6:%.*]] = phi i16 [ [[CONV28_6]], [[IF_ELSE_6]] ], [ [[CONV12_6]], [[IF_THEN_6]] ]
@@ -278,14 +278,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_7:%.*]] = sub nsw i32 [[CONV5_7]], [[CONV_7]]
 ; CHECK-NEXT:    [[MUL25_7:%.*]] = mul i32 [[ADD21_7]], [[CONV11_7]]
 ; CHECK-NEXT:    [[SHR26_7:%.*]] = lshr i32 [[MUL25_7]], 16
-; CHECK-NEXT:    [[TMP61:%.*]] = trunc i32 [[SHR26_7]] to i16
+; CHECK-NEXT:    [[TMP61:%.*]] = trunc nuw i32 [[SHR26_7]] to i16
 ; CHECK-NEXT:    [[CONV28_7:%.*]] = sub i16 0, [[TMP61]]
 ; CHECK-NEXT:    br label [[IF_END_7:%.*]]
 ; CHECK:       if.then.7:
 ; CHECK-NEXT:    [[ADD_7:%.*]] = add nuw nsw i32 [[CONV5_7]], [[CONV_7]]
 ; CHECK-NEXT:    [[MUL_7:%.*]] = mul i32 [[ADD_7]], [[CONV11_7]]
 ; CHECK-NEXT:    [[SHR_7:%.*]] = lshr i32 [[MUL_7]], 16
-; CHECK-NEXT:    [[CONV12_7:%.*]] = trunc i32 [[SHR_7]] to i16
+; CHECK-NEXT:    [[CONV12_7:%.*]] = trunc nuw i32 [[SHR_7]] to i16
 ; CHECK-NEXT:    br label [[IF_END_7]]
 ; CHECK:       if.end.7:
 ; CHECK-NEXT:    [[STOREMERGE_7:%.*]] = phi i16 [ [[CONV28_7]], [[IF_ELSE_7]] ], [ [[CONV12_7]], [[IF_THEN_7]] ]
@@ -306,14 +306,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_8:%.*]] = sub nsw i32 [[CONV5_8]], [[CONV_8]]
 ; CHECK-NEXT:    [[MUL25_8:%.*]] = mul i32 [[ADD21_8]], [[CONV11_8]]
 ; CHECK-NEXT:    [[SHR26_8:%.*]] = lshr i32 [[MUL25_8]], 16
-; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[SHR26_8]] to i16
+; CHECK-NEXT:    [[TMP65:%.*]] = trunc nuw i32 [[SHR26_8]] to i16
 ; CHECK-NEXT:    [[CONV28_8:%.*]] = sub i16 0, [[TMP65]]
 ; CHECK-NEXT:    br label [[IF_END_8:%.*]]
 ; CHECK:       if.then.8:
 ; CHECK-NEXT:    [[ADD_8:%.*]] = add nuw nsw i32 [[CONV5_8]], [[CONV_8]]
 ; CHECK-NEXT:    [[MUL_8:%.*]] = mul i32 [[ADD_8]], [[CONV11_8]]
 ; CHECK-NEXT:    [[SHR_8:%.*]] = lshr i32 [[MUL_8]], 16
-; CHECK-NEXT:    [[CONV12_8:%.*]] = trunc i32 [[SHR_8]] to i16
+; CHECK-NEXT:    [[CONV12_8:%.*]] = trunc nuw i32 [[SHR_8]] to i16
 ; CHECK-NEXT:    br label [[IF_END_8]]
 ; CHECK:       if.end.8:
 ; CHECK-NEXT:    [[STOREMERGE_8:%.*]] = phi i16 [ [[CONV28_8]], [[IF_ELSE_8]] ], [ [[CONV12_8]], [[IF_THEN_8]] ]
@@ -334,14 +334,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_9:%.*]] = sub nsw i32 [[CONV5_9]], [[CONV_9]]
 ; CHECK-NEXT:    [[MUL25_9:%.*]] = mul i32 [[ADD21_9]], [[CONV11_9]]
 ; CHECK-NEXT:    [[SHR26_9:%.*]] = lshr i32 [[MUL25_9]], 16
-; CHECK-NEXT:    [[TMP69:%.*]] = trunc i32 [[SHR26_9]] to i16
+; CHECK-NEXT:    [[TMP69:%.*]] = trunc nuw i32 [[SHR26_9]] to i16
 ; CHECK-NEXT:    [[CONV28_9:%.*]] = sub i16 0, [[TMP69]]
 ; CHECK-NEXT:    br label [[IF_END_9:%.*]]
 ; CHECK:       if.then.9:
 ; CHECK-NEXT:    [[ADD_9:%.*]] = add nuw nsw i32 [[CONV5_9]], [[CONV_9]]
 ; CHECK-NEXT:    [[MUL_9:%.*]] = mul i32 [[ADD_9]], [[CONV11_9]]
 ; CHECK-NEXT:    [[SHR_9:%.*]] = lshr i32 [[MUL_9]], 16
-; CHECK-NEXT:    [[CONV12_9:%.*]] = trunc i32 [[SHR_9]] to i16
+; CHECK-NEXT:    [[CONV12_9:%.*]] = trunc nuw i32 [[SHR_9]] to i16
 ; CHECK-NEXT:    br label [[IF_END_9]]
 ; CHECK:       if.end.9:
 ; CHECK-NEXT:    [[STOREMERGE_9:%.*]] = phi i16 [ [[CONV28_9]], [[IF_ELSE_9]] ], [ [[CONV12_9]], [[IF_THEN_9]] ]
@@ -362,14 +362,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_10:%.*]] = sub nsw i32 [[CONV5_10]], [[CONV_10]]
 ; CHECK-NEXT:    [[MUL25_10:%.*]] = mul i32 [[ADD21_10]], [[CONV11_10]]
 ; CHECK-NEXT:    [[SHR26_10:%.*]] = lshr i32 [[MUL25_10]], 16
-; CHECK-NEXT:    [[TMP73:%.*]] = trunc i32 [[SHR26_10]] to i16
+; CHECK-NEXT:    [[TMP73:%.*]] = trunc nuw i32 [[SHR26_10]] to i16
 ; CHECK-NEXT:    [[CONV28_10:%.*]] = sub i16 0, [[TMP73]]
 ; CHECK-NEXT:    br label [[IF_END_10:%.*]]
 ; CHECK:       if.then.10:
 ; CHECK-NEXT:    [[ADD_10:%.*]] = add nuw nsw i32 [[CONV5_10]], [[CONV_10]]
 ; CHECK-NEXT:    [[MUL_10:%.*]] = mul i32 [[ADD_10]], [[CONV11_10]]
 ; CHECK-NEXT:    [[SHR_10:%.*]] = lshr i32 [[MUL_10]], 16
-; CHECK-NEXT:    [[CONV12_10:%.*]] = trunc i32 [[SHR_10]] to i16
+; CHECK-NEXT:    [[CONV12_10:%.*]] = trunc nuw i32 [[SHR_10]] to i16
 ; CHECK-NEXT:    br label [[IF_END_10]]
 ; CHECK:       if.end.10:
 ; CHECK-NEXT:    [[STOREMERGE_10:%.*]] = phi i16 [ [[CONV28_10]], [[IF_ELSE_10]] ], [ [[CONV12_10]], [[IF_THEN_10]] ]
@@ -390,14 +390,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_11:%.*]] = sub nsw i32 [[CONV5_11]], [[CONV_11]]
 ; CHECK-NEXT:    [[MUL25_11:%.*]] = mul i32 [[ADD21_11]], [[CONV11_11]]
 ; CHECK-NEXT:    [[SHR26_11:%.*]] = lshr i32 [[MUL25_11]], 16
-; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[SHR26_11]] to i16
+; CHECK-NEXT:    [[TMP77:%.*]] = trunc nuw i32 [[SHR26_11]] to i16
 ; CHECK-NEXT:    [[CONV28_11:%.*]] = sub i16 0, [[TMP77]]
 ; CHECK-NEXT:    br label [[IF_END_11:%.*]]
 ; CHECK:       if.then.11:
 ; CHECK-NEXT:    [[ADD_11:%.*]] = add nuw nsw i32 [[CONV5_11]], [[CONV_11]]
 ; CHECK-NEXT:    [[MUL_11:%.*]] = mul i32 [[ADD_11]], [[CONV11_11]]
 ; CHECK-NEXT:    [[SHR_11:%.*]] = lshr i32 [[MUL_11]], 16
-; CHECK-NEXT:    [[CONV12_11:%.*]] = trunc i32 [[SHR_11]] to i16
+; CHECK-NEXT:    [[CONV12_11:%.*]] = trunc nuw i32 [[SHR_11]] to i16
 ; CHECK-NEXT:    br label [[IF_END_11]]
 ; CHECK:       if.end.11:
 ; CHECK-NEXT:    [[STOREMERGE_11:%.*]] = phi i16 [ [[CONV28_11]], [[IF_ELSE_11]] ], [ [[CONV12_11]], [[IF_THEN_11]] ]
@@ -418,14 +418,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_12:%.*]] = sub nsw i32 [[CONV5_12]], [[CONV_12]]
 ; CHECK-NEXT:    [[MUL25_12:%.*]] = mul i32 [[ADD21_12]], [[CONV11_12]]
 ; CHECK-NEXT:    [[SHR26_12:%.*]] = lshr i32 [[MUL25_12]], 16
-; CHECK-NEXT:    [[TMP81:%.*]] = trunc i32 [[SHR26_12]] to i16
+; CHECK-NEXT:    [[TMP81:%.*]] = trunc nuw i32 [[SHR26_12]] to i16
 ; CHECK-NEXT:    [[CONV28_12:%.*]] = sub i16 0, [[TMP81]]
 ; CHECK-NEXT:    br label [[IF_END_12:%.*]]
 ; CHECK:       if.then.12:
 ; CHECK-NEXT:    [[ADD_12:%.*]] = add nuw nsw i32 [[CONV5_12]], [[CONV_12]]
 ; CHECK-NEXT:    [[MUL_12:%.*]] = mul i32 [[ADD_12]], [[CONV11_12]]
 ; CHECK-NEXT:    [[SHR_12:%.*]] = lshr i32 [[MUL_12]], 16
-; CHECK-NEXT:    [[CONV12_12:%.*]] = trunc i32 [[SHR_12]] to i16
+; CHECK-NEXT:    [[CONV12_12:%.*]] = trunc nuw i32 [[SHR_12]] to i16
 ; CHECK-NEXT:    br label [[IF_END_12]]
 ; CHECK:       if.end.12:
 ; CHECK-NEXT:    [[STOREMERGE_12:%.*]] = phi i16 [ [[CONV28_12]], [[IF_ELSE_12]] ], [ [[CONV12_12]], [[IF_THEN_12]] ]
@@ -446,14 +446,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_13:%.*]] = sub nsw i32 [[CONV5_13]], [[CONV_13]]
 ; CHECK-NEXT:    [[MUL25_13:%.*]] = mul i32 [[ADD21_13]], [[CONV11_13]]
 ; CHECK-NEXT:    [[SHR26_13:%.*]] = lshr i32 [[MUL25_13]], 16
-; CHECK-NEXT:    [[TMP85:%.*]] = trunc i32 [[SHR26_13]] to i16
+; CHECK-NEXT:    [[TMP85:%.*]] = trunc nuw i32 [[SHR26_13]] to i16
 ; CHECK-NEXT:    [[CONV28_13:%.*]] = sub i16 0, [[TMP85]]
 ; CHECK-NEXT:    br label [[IF_END_13:%.*]]
 ; CHECK:       if.then.13:
 ; CHECK-NEXT:    [[ADD_13:%.*]] = add nuw nsw i32 [[CONV5_13]], [[CONV_13]]
 ; CHECK-NEXT:    [[MUL_13:%.*]] = mul i32 [[ADD_13]], [[CONV11_13]]
 ; CHECK-NEXT:    [[SHR_13:%.*]] = lshr i32 [[MUL_13]], 16
-; CHECK-NEXT:    [[CONV12_13:%.*]] = trunc i32 [[SHR_13]] to i16
+; CHECK-NEXT:    [[CONV12_13:%.*]] = trunc nuw i32 [[SHR_13]] to i16
 ; CHECK-NEXT:    br label [[IF_END_13]]
 ; CHECK:       if.end.13:
 ; CHECK-NEXT:    [[STOREMERGE_13:%.*]] = phi i16 [ [[CONV28_13]], [[IF_ELSE_13]] ], [ [[CONV12_13]], [[IF_THEN_13]] ]
@@ -474,14 +474,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_14:%.*]] = sub nsw i32 [[CONV5_14]], [[CONV_14]]
 ; CHECK-NEXT:    [[MUL25_14:%.*]] = mul i32 [[ADD21_14]], [[CONV11_14]]
 ; CHECK-NEXT:    [[SHR26_14:%.*]] = lshr i32 [[MUL25_14]], 16
-; CHECK-NEXT:    [[TMP89:%.*]] = trunc i32 [[SHR26_14]] to i16
+; CHECK-NEXT:    [[TMP89:%.*]] = trunc nuw i32 [[SHR26_14]] to i16
 ; CHECK-NEXT:    [[CONV28_14:%.*]] = sub i16 0, [[TMP89]]
 ; CHECK-NEXT:    br label [[IF_END_14:%.*]]
 ; CHECK:       if.then.14:
 ; CHECK-NEXT:    [[ADD_14:%.*]] = add nuw nsw i32 [[CONV5_14]], [[CONV_14]]
 ; CHECK-NEXT:    [[MUL_14:%.*]] = mul i32 [[ADD_14]], [[CONV11_14]]
 ; CHECK-NEXT:    [[SHR_14:%.*]] = lshr i32 [[MUL_14]], 16
-; CHECK-NEXT:    [[CONV12_14:%.*]] = trunc i32 [[SHR_14]] to i16
+; CHECK-NEXT:    [[CONV12_14:%.*]] = trunc nuw i32 [[SHR_14]] to i16
 ; CHECK-NEXT:    br label [[IF_END_14]]
 ; CHECK:       if.end.14:
 ; CHECK-NEXT:    [[STOREMERGE_14:%.*]] = phi i16 [ [[CONV28_14]], [[IF_ELSE_14]] ], [ [[CONV12_14]], [[IF_THEN_14]] ]
@@ -502,14 +502,14 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) {
 ; CHECK-NEXT:    [[ADD21_15:%.*]] = sub nsw i32 [[CONV5_15]], [[CONV_15]]
 ; CHECK-NEXT:    [[MUL25_15:%.*]] = mul i32 [[ADD21_15]], [[CONV11_15]]
 ; CHECK-NEXT:    [[SHR26_15:%.*]] = lshr i32 [[MUL25_15]], 16
-; CHECK-NEXT:    [[TMP93:%.*]] = trunc i32 [[SHR26_15]] to i16
+; CHECK-NEXT:    [[TMP93:%.*]] = trunc nuw i32 [[SHR26_15]] to i16
 ; CHECK-NEXT:    [[CONV28_15:%.*]] = sub i16 0, [[TMP93]]
 ; CHECK-NEXT:    br label [[IF_END_15]]
 ; CHECK:       if.then.15:
 ; CHECK-NEXT:    [[ADD_15:%.*]] = add nuw nsw i32 [[CONV5_15]], [[CONV_15]]
 ; CHECK-NEXT:    [[MUL_15:%.*]] = mul i32 [[ADD_15]], [[CONV11_15]]
 ; CHECK-NEXT:    [[SHR_15:%.*]] = lshr i32 [[MUL_15]], 16
-; CHECK-NEXT:    [[CONV12_15:%.*]] = trunc i32 [[SHR_15]] to i16
+; CHECK-NEXT:    [[CONV12_15:%.*]] = trunc nuw i32 [[SHR_15]] to i16
 ; CHECK-NEXT:    br label [[IF_END_15]]
 ; CHECK:       if.end.15:
 ; CHECK-NEXT:    [[STOREMERGE_15:%.*]] = phi i16 [ [[CONV28_15]], [[IF_ELSE_15]] ], [ [[CONV12_15]], [[IF_THEN_15]] ]

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll b/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll
index c38f2748a9763d..75505f632a43f3 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/pr46983.ll
@@ -55,7 +55,7 @@ define void @store_i8(ptr nocapture %0, i32 %1, i32 %2) {
 ; CHECK-NEXT:    [[TMP8:%.*]] = mul <4 x i32> [[TMP7]], [[TMP5]]
 ; CHECK-NEXT:    [[TMP9:%.*]] = lshr <4 x i32> [[TMP8]], <i32 15, i32 15, i32 15, i32 15>
 ; CHECK-NEXT:    [[TMP10:%.*]] = call <4 x i32> @llvm.umin.v4i32(<4 x i32> [[TMP9]], <4 x i32> <i32 255, i32 255, i32 255, i32 255>)
-; CHECK-NEXT:    [[TMP11:%.*]] = trunc <4 x i32> [[TMP10]] to <4 x i8>
+; CHECK-NEXT:    [[TMP11:%.*]] = trunc nuw <4 x i32> [[TMP10]] to <4 x i8>
 ; CHECK-NEXT:    store <4 x i8> [[TMP11]], ptr [[TMP0]], align 1, !tbaa [[TBAA4]]
 ; CHECK-NEXT:    ret void
 ;


        


More information about the cfe-commits mailing list