[llvm] e91fe08 - [NFCI] Regenerate PhaseOrdering test checks

Dávid Bolvanský via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 3 15:31:13 PDT 2022


Author: Dávid Bolvanský
Date: 2022-04-04T00:28:57+02:00
New Revision: e91fe08999d5f5d7e7777837c529bac692d06c1b

URL: https://github.com/llvm/llvm-project/commit/e91fe08999d5f5d7e7777837c529bac692d06c1b
DIFF: https://github.com/llvm/llvm-project/commit/e91fe08999d5f5d7e7777837c529bac692d06c1b.diff

LOG: [NFCI] Regenerate PhaseOrdering test checks

Added: 
    

Modified: 
    llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
    llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll
    llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
    llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
    llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
    llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll
    llvm/test/Transforms/PhaseOrdering/X86/addsub.ll
    llvm/test/Transforms/PhaseOrdering/X86/loop-idiom-vs-indvars.ll
    llvm/test/Transforms/PhaseOrdering/X86/peel-before-lv-to-enable-vectorization.ll
    llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
    llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
    llvm/test/Transforms/PhaseOrdering/basic.ll
    llvm/test/Transforms/PhaseOrdering/d83507-knowledge-retention-bug.ll
    llvm/test/Transforms/PhaseOrdering/expect.ll
    llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
    llvm/test/Transforms/PhaseOrdering/pr39282.ll
    llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll b/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
index 13404a8b6a719..24fb79b6daa51 100644
--- a/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
+++ b/llvm/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -O2 -S < %s | FileCheck %s
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -11,6 +12,10 @@ target triple = "x86_64-apple-darwin11.1"
 @.str = private constant [25 x i8] c"x.second() was clobbered\00", align 1 ; <[25 x i8]*> [#uses=1]
 
 define i32 @main(i32 %argc, i8** %argv) ssp {
+; CHECK-LABEL: @main(
+; CHECK-NEXT:  bb1:
+; CHECK-NEXT:    ret i32 0
+;
 entry:
   %argc_addr = alloca i32, align 4                ; <i32*> [#uses=1]
   %argv_addr = alloca i8**, align 8               ; <i8***> [#uses=1]
@@ -48,8 +53,6 @@ bb1:                                              ; preds = %entry
   store i32 %11, i32* %retval, align 4
   br label %return
 
-; CHECK-NOT: x.second() was clobbered
-; CHECK: ret i32
 return:                                           ; preds = %bb1
   %retval2 = load i32, i32* %retval                    ; <i32> [#uses=1]
   ret i32 %retval2

diff  --git a/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll
index 5ef5bb185ab02..7228fe5aebd91 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/loopflatten.ll
@@ -17,12 +17,12 @@ define dso_local void @_Z3fooPiii(i32* %A, i32 %N, i32 %M) #0 {
 ; CHECK-NEXT:    [[FLATTEN_TRIPCOUNT:%.*]] = mul nuw nsw i64 [[TMP0]], [[TMP1]]
 ; CHECK-NEXT:    br label [[FOR_COND1_PREHEADER_US:%.*]]
 ; CHECK:       for.cond1.preheader.us:
-; CHECK-NEXT:    [[INDVAR6:%.*]] = phi i64 [ [[INDVAR_NEXT7:%.*]], [[FOR_COND1_PREHEADER_US]] ], [ 0, [[FOR_COND1_PREHEADER_LR_PH_SPLIT_US]] ]
-; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVAR6]]
+; CHECK-NEXT:    [[INDVAR7:%.*]] = phi i64 [ [[INDVAR_NEXT8:%.*]], [[FOR_COND1_PREHEADER_US]] ], [ 0, [[FOR_COND1_PREHEADER_LR_PH_SPLIT_US]] ]
+; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVAR7]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_US]], align 4
 ; CHECK-NEXT:    tail call void @_Z1fi(i32 [[TMP2]])
-; CHECK-NEXT:    [[INDVAR_NEXT7]] = add nuw nsw i64 [[INDVAR6]], 1
-; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVAR_NEXT7]], [[FLATTEN_TRIPCOUNT]]
+; CHECK-NEXT:    [[INDVAR_NEXT8]] = add nuw nsw i64 [[INDVAR7]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVAR_NEXT8]], [[FLATTEN_TRIPCOUNT]]
 ; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_COND1_PREHEADER_US]]
 ; CHECK:       for.cond.cleanup:
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
index f71034c05c67f..afcc679a3c666 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
@@ -35,7 +35,7 @@ define i64 @sum_2_at_with_int_conversion(%vec* %A, %vec* %B, i64 %N) {
 ; CHECK-NEXT:    [[UMIN16:%.*]] = call i64 @llvm.umin.i64(i64 [[UMIN]], i64 [[TMP0]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[UMIN16]], 1
 ; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 5
-; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[LOOP_PREHEADER22:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[LOOP_PREHEADER21:%.*]], label [[VECTOR_PH:%.*]]
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[N_MOD_VF:%.*]] = and i64 [[TMP1]], 3
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
@@ -47,38 +47,38 @@ define i64 @sum_2_at_with_int_conversion(%vec* %A, %vec* %B, i64 %N) {
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[VEC_PHI18:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI17:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP16:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = or i64 [[INDEX]], 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i64, i64* [[START_I]], i64 [[OFFSET_IDX]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i64* [[TMP5]] to <2 x i64>*
 ; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP6]], align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr i64, i64* [[TMP5]], i64 2
 ; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i64* [[TMP7]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD19:%.*]] = load <2 x i64>, <2 x i64>* [[TMP8]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD18:%.*]] = load <2 x i64>, <2 x i64>* [[TMP8]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr i64, i64* [[START_I2_PEEL]], i64 [[OFFSET_IDX]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i64* [[TMP9]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD20:%.*]] = load <2 x i64>, <2 x i64>* [[TMP10]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD19:%.*]] = load <2 x i64>, <2 x i64>* [[TMP10]], align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr i64, i64* [[TMP9]], i64 2
 ; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i64* [[TMP11]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD21:%.*]] = load <2 x i64>, <2 x i64>* [[TMP12]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD20:%.*]] = load <2 x i64>, <2 x i64>* [[TMP12]], align 4
 ; CHECK-NEXT:    [[TMP13:%.*]] = add <2 x i64> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT:    [[TMP14:%.*]] = add <2 x i64> [[WIDE_LOAD19]], [[VEC_PHI18]]
-; CHECK-NEXT:    [[TMP15]] = add <2 x i64> [[TMP13]], [[WIDE_LOAD20]]
-; CHECK-NEXT:    [[TMP16]] = add <2 x i64> [[TMP14]], [[WIDE_LOAD21]]
+; CHECK-NEXT:    [[TMP14:%.*]] = add <2 x i64> [[WIDE_LOAD18]], [[VEC_PHI17]]
+; CHECK-NEXT:    [[TMP15]] = add <2 x i64> [[TMP13]], [[WIDE_LOAD19]]
+; CHECK-NEXT:    [[TMP16]] = add <2 x i64> [[TMP14]], [[WIDE_LOAD20]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
 ; CHECK-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <2 x i64> [[TMP16]], [[TMP15]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
-; CHECK-NEXT:    br label [[LOOP_PREHEADER22]]
+; CHECK-NEXT:    br label [[LOOP_PREHEADER21]]
 ; CHECK:       loop.preheader21:
 ; CHECK-NEXT:    [[IV_PH:%.*]] = phi i64 [ 1, [[LOOP_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
 ; CHECK-NEXT:    [[SUM_PH:%.*]] = phi i64 [ [[SUM_NEXT_PEEL]], [[LOOP_PREHEADER]] ], [ [[TMP18]], [[MIDDLE_BLOCK]] ]
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT12:%.*]] ], [ [[IV_PH]], [[LOOP_PREHEADER22]] ]
-; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT12]] ], [ [[SUM_PH]], [[LOOP_PREHEADER22]] ]
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT12:%.*]] ], [ [[IV_PH]], [[LOOP_PREHEADER21]] ]
+; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT12]] ], [ [[SUM_PH]], [[LOOP_PREHEADER21]] ]
 ; CHECK-NEXT:    [[INRANGE_I:%.*]] = icmp ult i64 [[SUB_I]], [[IV]]
 ; CHECK-NEXT:    br i1 [[INRANGE_I]], label [[ERROR_I:%.*]], label [[AT_WITH_INT_CONVERSION_EXIT:%.*]]
 ; CHECK:       error.i:
@@ -160,7 +160,7 @@ define i64 @sum_3_at_with_int_conversion(%vec* %A, %vec* %B, %vec* %C, i64 %N) {
 ; CHECK-NEXT:    [[UMIN29:%.*]] = call i64 @llvm.umin.i64(i64 [[UMIN28]], i64 [[TMP0]])
 ; CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[UMIN29]], 1
 ; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 5
-; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[LOOP_PREHEADER37:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[LOOP_PREHEADER36:%.*]], label [[VECTOR_PH:%.*]]
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[N_MOD_VF:%.*]] = and i64 [[TMP1]], 3
 ; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
@@ -172,46 +172,46 @@ define i64 @sum_3_at_with_int_conversion(%vec* %A, %vec* %B, %vec* %C, i64 %N) {
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[VEC_PHI31:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI30:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = or i64 [[INDEX]], 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i64, i64* [[START_I]], i64 [[OFFSET_IDX]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i64* [[TMP5]] to <2 x i64>*
 ; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i64>, <2 x i64>* [[TMP6]], align 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr i64, i64* [[TMP5]], i64 2
 ; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i64* [[TMP7]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD32:%.*]] = load <2 x i64>, <2 x i64>* [[TMP8]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD31:%.*]] = load <2 x i64>, <2 x i64>* [[TMP8]], align 4
 ; CHECK-NEXT:    [[TMP9:%.*]] = getelementptr i64, i64* [[START_I2_PEEL]], i64 [[OFFSET_IDX]]
 ; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i64* [[TMP9]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD33:%.*]] = load <2 x i64>, <2 x i64>* [[TMP10]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD32:%.*]] = load <2 x i64>, <2 x i64>* [[TMP10]], align 4
 ; CHECK-NEXT:    [[TMP11:%.*]] = getelementptr i64, i64* [[TMP9]], i64 2
 ; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i64* [[TMP11]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD34:%.*]] = load <2 x i64>, <2 x i64>* [[TMP12]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD33:%.*]] = load <2 x i64>, <2 x i64>* [[TMP12]], align 4
 ; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr i64, i64* [[START_I14_PEEL]], i64 [[OFFSET_IDX]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = bitcast i64* [[TMP13]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD35:%.*]] = load <2 x i64>, <2 x i64>* [[TMP14]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD34:%.*]] = load <2 x i64>, <2 x i64>* [[TMP14]], align 4
 ; CHECK-NEXT:    [[TMP15:%.*]] = getelementptr i64, i64* [[TMP13]], i64 2
 ; CHECK-NEXT:    [[TMP16:%.*]] = bitcast i64* [[TMP15]] to <2 x i64>*
-; CHECK-NEXT:    [[WIDE_LOAD36:%.*]] = load <2 x i64>, <2 x i64>* [[TMP16]], align 4
+; CHECK-NEXT:    [[WIDE_LOAD35:%.*]] = load <2 x i64>, <2 x i64>* [[TMP16]], align 4
 ; CHECK-NEXT:    [[TMP17:%.*]] = add <2 x i64> [[WIDE_LOAD]], [[VEC_PHI]]
-; CHECK-NEXT:    [[TMP18:%.*]] = add <2 x i64> [[WIDE_LOAD32]], [[VEC_PHI31]]
-; CHECK-NEXT:    [[TMP19:%.*]] = add <2 x i64> [[TMP17]], [[WIDE_LOAD33]]
-; CHECK-NEXT:    [[TMP20:%.*]] = add <2 x i64> [[TMP18]], [[WIDE_LOAD34]]
-; CHECK-NEXT:    [[TMP21]] = add <2 x i64> [[TMP19]], [[WIDE_LOAD35]]
-; CHECK-NEXT:    [[TMP22]] = add <2 x i64> [[TMP20]], [[WIDE_LOAD36]]
+; CHECK-NEXT:    [[TMP18:%.*]] = add <2 x i64> [[WIDE_LOAD31]], [[VEC_PHI30]]
+; CHECK-NEXT:    [[TMP19:%.*]] = add <2 x i64> [[TMP17]], [[WIDE_LOAD32]]
+; CHECK-NEXT:    [[TMP20:%.*]] = add <2 x i64> [[TMP18]], [[WIDE_LOAD33]]
+; CHECK-NEXT:    [[TMP21]] = add <2 x i64> [[TMP19]], [[WIDE_LOAD34]]
+; CHECK-NEXT:    [[TMP22]] = add <2 x i64> [[TMP20]], [[WIDE_LOAD35]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
 ; CHECK-NEXT:    [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
 ; CHECK-NEXT:    br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
 ; CHECK:       middle.block:
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <2 x i64> [[TMP22]], [[TMP21]]
 ; CHECK-NEXT:    [[TMP24:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
-; CHECK-NEXT:    br label [[LOOP_PREHEADER37]]
+; CHECK-NEXT:    br label [[LOOP_PREHEADER36]]
 ; CHECK:       loop.preheader36:
 ; CHECK-NEXT:    [[IV_PH:%.*]] = phi i64 [ 1, [[LOOP_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
 ; CHECK-NEXT:    [[SUM_PH:%.*]] = phi i64 [ [[SUM_NEXT_PEEL]], [[LOOP_PREHEADER]] ], [ [[TMP24]], [[MIDDLE_BLOCK]] ]
 ; CHECK-NEXT:    br label [[LOOP:%.*]]
 ; CHECK:       loop:
-; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT24:%.*]] ], [ [[IV_PH]], [[LOOP_PREHEADER37]] ]
-; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT24]] ], [ [[SUM_PH]], [[LOOP_PREHEADER37]] ]
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT24:%.*]] ], [ [[IV_PH]], [[LOOP_PREHEADER36]] ]
+; CHECK-NEXT:    [[SUM:%.*]] = phi i64 [ [[SUM_NEXT:%.*]], [[AT_WITH_INT_CONVERSION_EXIT24]] ], [ [[SUM_PH]], [[LOOP_PREHEADER36]] ]
 ; CHECK-NEXT:    [[INRANGE_I:%.*]] = icmp ult i64 [[SUB_I]], [[IV]]
 ; CHECK-NEXT:    br i1 [[INRANGE_I]], label [[ERROR_I:%.*]], label [[AT_WITH_INT_CONVERSION_EXIT:%.*]]
 ; CHECK:       error.i:

diff  --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
index cacedbb7aa962..0d78006f77f46 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_add_q7.ll
@@ -26,8 +26,8 @@ define dso_local void @arm_add_q7(i8* %pSrcA, i8* %pSrcB, i8* noalias %pDst, i32
 ; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[NEXT_GEP]] to <16 x i8>*
 ; CHECK-NEXT:    [[WIDE_MASKED_LOAD:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* [[TMP0]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[NEXT_GEP15]] to <16 x i8>*
-; CHECK-NEXT:    [[WIDE_MASKED_LOAD18:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* [[TMP1]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
-; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[WIDE_MASKED_LOAD]], <16 x i8> [[WIDE_MASKED_LOAD18]])
+; CHECK-NEXT:    [[WIDE_MASKED_LOAD16:%.*]] = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* [[TMP1]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]], <16 x i8> poison)
+; CHECK-NEXT:    [[TMP2:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> [[WIDE_MASKED_LOAD]], <16 x i8> [[WIDE_MASKED_LOAD16]])
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[NEXT_GEP14]] to <16 x i8>*
 ; CHECK-NEXT:    call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> [[TMP2]], <16 x i8>* [[TMP3]], i32 1, <16 x i1> [[ACTIVE_LANE_MASK]])
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add i32 [[INDEX]], 16

diff  --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
index 42b74635c6476..33b90b5295e39 100644
--- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
+++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_fill_q7.ll
@@ -12,20 +12,20 @@ target triple = "thumbv6m-none-none-eabi"
 define dso_local void @arm_fill_q7(i8 signext %value, i8* %pDst, i32 %blockSize) #0 {
 ; OLDPM-LABEL: @arm_fill_q7(
 ; OLDPM-NEXT:  entry:
-; OLDPM-NEXT:    [[CMP_NOT20:%.*]] = icmp ult i32 [[BLOCKSIZE:%.*]], 4
-; OLDPM-NEXT:    br i1 [[CMP_NOT20]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
+; OLDPM-NEXT:    [[CMP_NOT17:%.*]] = icmp ult i32 [[BLOCKSIZE:%.*]], 4
+; OLDPM-NEXT:    br i1 [[CMP_NOT17]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
 ; OLDPM:       while.body.preheader:
-; OLDPM-NEXT:    [[TMP0:%.*]] = and i32 [[BLOCKSIZE]], -4
-; OLDPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST:%.*]], i8 [[VALUE:%.*]], i32 [[TMP0]], i1 false)
-; OLDPM-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, i8* [[PDST]], i32 [[TMP0]]
+; OLDPM-NEXT:    [[SHR:%.*]] = and i32 [[BLOCKSIZE]], -4
+; OLDPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST:%.*]], i8 [[VALUE:%.*]], i32 [[SHR]], i1 false), !tbaa [[TBAA3:![0-9]+]]
+; OLDPM-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, i8* [[PDST]], i32 [[SHR]]
 ; OLDPM-NEXT:    br label [[WHILE_END]]
 ; OLDPM:       while.end:
 ; OLDPM-NEXT:    [[PDST_ADDR_0_LCSSA:%.*]] = phi i8* [ [[PDST]], [[ENTRY:%.*]] ], [ [[SCEVGEP]], [[WHILE_BODY_PREHEADER]] ]
 ; OLDPM-NEXT:    [[REM:%.*]] = and i32 [[BLOCKSIZE]], 3
-; OLDPM-NEXT:    [[CMP14_NOT17:%.*]] = icmp eq i32 [[REM]], 0
-; OLDPM-NEXT:    br i1 [[CMP14_NOT17]], label [[WHILE_END18:%.*]], label [[WHILE_BODY16_PREHEADER:%.*]]
+; OLDPM-NEXT:    [[CMP14_NOT20:%.*]] = icmp eq i32 [[REM]], 0
+; OLDPM-NEXT:    br i1 [[CMP14_NOT20]], label [[WHILE_END18:%.*]], label [[WHILE_BODY16_PREHEADER:%.*]]
 ; OLDPM:       while.body16.preheader:
-; OLDPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST_ADDR_0_LCSSA]], i8 [[VALUE]], i32 [[REM]], i1 false)
+; OLDPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST_ADDR_0_LCSSA]], i8 [[VALUE]], i32 [[REM]], i1 false), !tbaa [[TBAA3]]
 ; OLDPM-NEXT:    br label [[WHILE_END18]]
 ; OLDPM:       while.end18:
 ; OLDPM-NEXT:    ret void
@@ -35,9 +35,9 @@ define dso_local void @arm_fill_q7(i8 signext %value, i8* %pDst, i32 %blockSize)
 ; NEWPM-NEXT:    [[CMP_NOT17:%.*]] = icmp ult i32 [[BLOCKSIZE:%.*]], 4
 ; NEWPM-NEXT:    br i1 [[CMP_NOT17]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
 ; NEWPM:       while.body.preheader:
-; NEWPM-NEXT:    [[TMP0:%.*]] = and i32 [[BLOCKSIZE]], -4
-; NEWPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST:%.*]], i8 [[VALUE:%.*]], i32 [[TMP0]], i1 false)
-; NEWPM-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, i8* [[PDST]], i32 [[TMP0]]
+; NEWPM-NEXT:    [[SHR:%.*]] = and i32 [[BLOCKSIZE]], -4
+; NEWPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST:%.*]], i8 [[VALUE:%.*]], i32 [[SHR]], i1 false), !tbaa [[TBAA3:![0-9]+]]
+; NEWPM-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, i8* [[PDST]], i32 [[SHR]]
 ; NEWPM-NEXT:    br label [[WHILE_END]]
 ; NEWPM:       while.end:
 ; NEWPM-NEXT:    [[PDST_ADDR_0_LCSSA:%.*]] = phi i8* [ [[PDST]], [[ENTRY:%.*]] ], [ [[SCEVGEP]], [[WHILE_BODY_PREHEADER]] ]
@@ -45,7 +45,7 @@ define dso_local void @arm_fill_q7(i8 signext %value, i8* %pDst, i32 %blockSize)
 ; NEWPM-NEXT:    [[CMP14_NOT20:%.*]] = icmp eq i32 [[REM]], 0
 ; NEWPM-NEXT:    br i1 [[CMP14_NOT20]], label [[WHILE_END18:%.*]], label [[WHILE_BODY16_PREHEADER:%.*]]
 ; NEWPM:       while.body16.preheader:
-; NEWPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST_ADDR_0_LCSSA]], i8 [[VALUE]], i32 [[REM]], i1 false)
+; NEWPM-NEXT:    call void @llvm.memset.p0i8.i32(i8* align 1 [[PDST_ADDR_0_LCSSA]], i8 [[VALUE]], i32 [[REM]], i1 false), !tbaa [[TBAA3]]
 ; NEWPM-NEXT:    br label [[WHILE_END18]]
 ; NEWPM:       while.end18:
 ; NEWPM-NEXT:    ret void

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll b/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll
index 938063ffef687..108c71c4e0860 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/addsub-inseltpoison.ll
@@ -13,8 +13,8 @@ define <4 x float> @PR45015(<4 x float> %arg, <4 x float> %arg1) {
 ; CHECK-LABEL: @PR45015(
 ; CHECK-NEXT:    [[TMP1:%.*]] = fsub <4 x float> [[ARG:%.*]], [[ARG1:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = fadd <4 x float> [[ARG]], [[ARG1]]
-; CHECK-NEXT:    [[T16:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-; CHECK-NEXT:    ret <4 x float> [[T16]]
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT:    ret <4 x float> [[TMP3]]
 ;
   %t = extractelement <4 x float> %arg, i32 0
   %t2 = extractelement <4 x float> %arg1, i32 0

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll b/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll
index 1c2099420dda6..c41c9750c289c 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/addsub.ll
@@ -13,8 +13,8 @@ define <4 x float> @PR45015(<4 x float> %arg, <4 x float> %arg1) {
 ; CHECK-LABEL: @PR45015(
 ; CHECK-NEXT:    [[TMP1:%.*]] = fsub <4 x float> [[ARG:%.*]], [[ARG1:%.*]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = fadd <4 x float> [[ARG]], [[ARG1]]
-; CHECK-NEXT:    [[T16:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
-; CHECK-NEXT:    ret <4 x float> [[T16]]
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
+; CHECK-NEXT:    ret <4 x float> [[TMP3]]
 ;
   %t = extractelement <4 x float> %arg, i32 0
   %t2 = extractelement <4 x float> %arg1, i32 0

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/loop-idiom-vs-indvars.ll b/llvm/test/Transforms/PhaseOrdering/X86/loop-idiom-vs-indvars.ll
index 91c4c9078b54e..86ba6ee95d8fb 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/loop-idiom-vs-indvars.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/loop-idiom-vs-indvars.ll
@@ -12,7 +12,7 @@ define i32 @cttz(i32 %n, i32* %p1) {
 ; ALL-LABEL: @cttz(
 ; ALL-NEXT:  entry:
 ; ALL-NEXT:    [[TMP0:%.*]] = shl i32 [[N:%.*]], 1
-; ALL-NEXT:    [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false), [[RNG0:!range !.*]]
+; ALL-NEXT:    [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 false), !range [[RNG0:![0-9]+]]
 ; ALL-NEXT:    [[TMP2:%.*]] = sub nuw nsw i32 32, [[TMP1]]
 ; ALL-NEXT:    [[TMP3:%.*]] = sub nuw nsw i32 75, [[TMP1]]
 ; ALL-NEXT:    store i32 [[TMP3]], i32* [[P1:%.*]], align 4

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/peel-before-lv-to-enable-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/X86/peel-before-lv-to-enable-vectorization.ll
index b30b6990f8863..3985d8f9bd98a 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/peel-before-lv-to-enable-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/peel-before-lv-to-enable-vectorization.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -O2 -S %s | FileCheck %s
 ; RUN: opt -passes='default<O2>' -S %s | FileCheck %s
 
@@ -10,10 +11,67 @@ target triple = "x86_64-apple-macosx"
 ; Test case from PR47671.
 
 define i32 @test(i32* readonly %p, i32* readnone %q) {
-; CHECK-LABEL: define i32 @test(
-; CHECK: vector.body:
-; CHECK:   %index.next = add nuw i64 %index, 8
-; CHECK: middle.block:
+; CHECK-LABEL: @test(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[P4:%.*]] = ptrtoint i32* [[P:%.*]] to i64
+; CHECK-NEXT:    [[Q3:%.*]] = ptrtoint i32* [[Q:%.*]] to i64
+; CHECK-NEXT:    [[CMP_NOT7:%.*]] = icmp eq i32* [[P]], [[Q]]
+; CHECK-NEXT:    br i1 [[CMP_NOT7]], label [[EXIT:%.*]], label [[LOOP_PREHEADER:%.*]]
+; CHECK:       loop.preheader:
+; CHECK-NEXT:    [[LV_PEEL:%.*]] = load i32, i32* [[P]], align 4
+; CHECK-NEXT:    [[IV_NEXT_PEEL:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
+; CHECK-NEXT:    [[CMP_NOT_PEEL:%.*]] = icmp eq i32* [[IV_NEXT_PEEL]], [[Q]]
+; CHECK-NEXT:    br i1 [[CMP_NOT_PEEL]], label [[EXIT]], label [[LOOP_PREHEADER2:%.*]]
+; CHECK:       loop.preheader2:
+; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[Q3]], -8
+; CHECK-NEXT:    [[TMP1:%.*]] = sub i64 [[TMP0]], [[P4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i64 [[TMP1]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 28
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[LOOP_PREHEADER8:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK:       vector.ph:
+; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[TMP3]], 9223372036854775800
+; CHECK-NEXT:    [[IND_END:%.*]] = getelementptr i32, i32* [[IV_NEXT_PEEL]], i64 [[N_VEC]]
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x i32> <i32 poison, i32 0, i32 0, i32 0>, i32 [[LV_PEEL]], i64 0
+; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
+; CHECK:       vector.body:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[TMP10:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI5:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[NEXT_GEP:%.*]] = getelementptr i32, i32* [[IV_NEXT_PEEL]], i64 [[INDEX]]
+; CHECK-NEXT:    [[TMP5:%.*]] = add <4 x i32> [[VEC_PHI]], <i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT:    [[TMP6:%.*]] = add <4 x i32> [[VEC_PHI5]], <i32 2, i32 2, i32 2, i32 2>
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[NEXT_GEP]] to <4 x i32>*
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[NEXT_GEP]], i64 4
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
+; CHECK-NEXT:    [[WIDE_LOAD7:%.*]] = load <4 x i32>, <4 x i32>* [[TMP9]], align 4
+; CHECK-NEXT:    [[TMP10]] = add <4 x i32> [[WIDE_LOAD]], [[TMP5]]
+; CHECK-NEXT:    [[TMP11]] = add <4 x i32> [[WIDE_LOAD7]], [[TMP6]]
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       middle.block:
+; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <4 x i32> [[TMP11]], [[TMP10]]
+; CHECK-NEXT:    [[TMP13:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]])
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP3]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label [[EXIT]], label [[LOOP_PREHEADER8]]
+; CHECK:       loop.preheader8:
+; CHECK-NEXT:    [[SUM_PH:%.*]] = phi i32 [ [[LV_PEEL]], [[LOOP_PREHEADER2]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    [[IV_PH:%.*]] = phi i32* [ [[IV_NEXT_PEEL]], [[LOOP_PREHEADER2]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
+; CHECK-NEXT:    br label [[LOOP:%.*]]
+; CHECK:       loop:
+; CHECK-NEXT:    [[SUM:%.*]] = phi i32 [ [[SUM_NEXT:%.*]], [[LOOP]] ], [ [[SUM_PH]], [[LOOP_PREHEADER8]] ]
+; CHECK-NEXT:    [[IV:%.*]] = phi i32* [ [[IV_NEXT:%.*]], [[LOOP]] ], [ [[IV_PH]], [[LOOP_PREHEADER8]] ]
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[SUM]], 2
+; CHECK-NEXT:    [[LV:%.*]] = load i32, i32* [[IV]], align 4
+; CHECK-NEXT:    [[SUM_NEXT]] = add nsw i32 [[LV]], [[ADD]]
+; CHECK-NEXT:    [[IV_NEXT]] = getelementptr inbounds i32, i32* [[IV]], i64 1
+; CHECK-NEXT:    [[CMP_NOT:%.*]] = icmp eq i32* [[IV_NEXT]], [[Q]]
+; CHECK-NEXT:    br i1 [[CMP_NOT]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK:       exit:
+; CHECK-NEXT:    [[SUM_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[LV_PEEL]], [[LOOP_PREHEADER]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ], [ [[SUM_NEXT]], [[LOOP]] ]
+; CHECK-NEXT:    ret i32 [[SUM_0_LCSSA]]
 ;
 entry:
   %cmp.not7 = icmp eq i32* %p, %q

diff  --git a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
index f6b3438fa8b35..bd764efde1282 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-expanded.ll
@@ -15,9 +15,9 @@ define i32 @add_v4i32(i32* %p) #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4, !tbaa [[TBAA0:![0-9]+]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = add <4 x i32> [[TMP1]], [[RDX_SHUF]]
-; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    [[BIN_RDX4:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF3]]
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[BIN_RDX4]], i32 0
+; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX5:%.*]] = add <4 x i32> [[BIN_RDX]], [[RDX_SHUF4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[BIN_RDX5]], i32 0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
 entry:
@@ -145,10 +145,10 @@ define i32 @smin_v4i32(i32* %p) #0 {
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = icmp slt <4 x i32> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_MINMAX_SELECT:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP]], <4 x i32> [[TMP1]], <4 x i32> [[RDX_SHUF]]
-; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    [[RDX_MINMAX_CMP4:%.*]] = icmp slt <4 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF3]]
-; CHECK-NEXT:    [[RDX_MINMAX_SELECT5:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP4]], <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> [[RDX_SHUF3]]
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[RDX_MINMAX_SELECT5]], i32 0
+; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[RDX_MINMAX_CMP5:%.*]] = icmp slt <4 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF4]]
+; CHECK-NEXT:    [[RDX_MINMAX_SELECT6:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP5]], <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> [[RDX_SHUF4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[RDX_MINMAX_SELECT6]], i32 0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
 entry:
@@ -199,10 +199,10 @@ define i32 @umax_v4i32(i32* %p) #0 {
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP1]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = icmp ugt <4 x i32> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_MINMAX_SELECT:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP]], <4 x i32> [[TMP1]], <4 x i32> [[RDX_SHUF]]
-; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    [[RDX_MINMAX_CMP4:%.*]] = icmp ugt <4 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF3]]
-; CHECK-NEXT:    [[RDX_MINMAX_SELECT5:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP4]], <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> [[RDX_SHUF3]]
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[RDX_MINMAX_SELECT5]], i32 0
+; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[RDX_MINMAX_CMP5:%.*]] = icmp ugt <4 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF4]]
+; CHECK-NEXT:    [[RDX_MINMAX_SELECT6:%.*]] = select <4 x i1> [[RDX_MINMAX_CMP5]], <4 x i32> [[RDX_MINMAX_SELECT]], <4 x i32> [[RDX_SHUF4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x i32> [[RDX_MINMAX_SELECT6]], i32 0
 ; CHECK-NEXT:    ret i32 [[TMP2]]
 ;
 entry:
@@ -252,11 +252,11 @@ define float @fadd_v4i32(float* %p) #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4, !tbaa [[TBAA7:![0-9]+]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = fadd fast <4 x float> [[TMP1]], [[RDX_SHUF]]
-; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    [[BIN_RDX4:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF3]]
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[BIN_RDX4]], i32 0
-; CHECK-NEXT:    [[BIN_RDX5:%.*]] = fadd fast float 4.200000e+01, [[TMP2]]
-; CHECK-NEXT:    ret float [[BIN_RDX5]]
+; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX5:%.*]] = fadd fast <4 x float> [[BIN_RDX]], [[RDX_SHUF4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[BIN_RDX5]], i32 0
+; CHECK-NEXT:    [[BIN_RDX6:%.*]] = fadd fast float 4.200000e+01, [[TMP2]]
+; CHECK-NEXT:    ret float [[BIN_RDX6]]
 ;
 entry:
   br label %for.cond
@@ -292,11 +292,11 @@ define float @fmul_v4i32(float* %p) #0 {
 ; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4, !tbaa [[TBAA7]]
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[BIN_RDX:%.*]] = fmul fast <4 x float> [[TMP1]], [[RDX_SHUF]]
-; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    [[BIN_RDX4:%.*]] = fmul fast <4 x float> [[BIN_RDX]], [[RDX_SHUF3]]
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[BIN_RDX4]], i32 0
-; CHECK-NEXT:    [[BIN_RDX5:%.*]] = fmul fast float 1.000000e+00, [[TMP2]]
-; CHECK-NEXT:    [[OP_EXTRA:%.*]] = fmul fast float [[BIN_RDX5]], 4.200000e+01
+; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <4 x float> [[BIN_RDX]], <4 x float> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX5:%.*]] = fmul fast <4 x float> [[BIN_RDX]], [[RDX_SHUF4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[BIN_RDX5]], i32 0
+; CHECK-NEXT:    [[BIN_RDX6:%.*]] = fmul fast float 1.000000e+00, [[TMP2]]
+; CHECK-NEXT:    [[OP_EXTRA:%.*]] = fmul fast float [[BIN_RDX6]], 4.200000e+01
 ; CHECK-NEXT:    ret float [[OP_EXTRA]]
 ;
 entry:
@@ -334,10 +334,10 @@ define float @fmin_v4f32(float* %p) #0 {
 ; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
 ; CHECK-NEXT:    [[RDX_MINMAX_CMP:%.*]] = fcmp fast olt <4 x float> [[TMP1]], [[RDX_SHUF]]
 ; CHECK-NEXT:    [[RDX_MINMAX_SELECT:%.*]] = select fast <4 x i1> [[RDX_MINMAX_CMP]], <4 x float> [[TMP1]], <4 x float> [[RDX_SHUF]]
-; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <4 x float> [[RDX_MINMAX_SELECT]], <4 x float> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT:    [[RDX_MINMAX_CMP4:%.*]] = fcmp fast olt <4 x float> [[RDX_MINMAX_SELECT]], [[RDX_SHUF3]]
-; CHECK-NEXT:    [[RDX_MINMAX_SELECT5:%.*]] = select fast <4 x i1> [[RDX_MINMAX_CMP4]], <4 x float> [[RDX_MINMAX_SELECT]], <4 x float> [[RDX_SHUF3]]
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[RDX_MINMAX_SELECT5]], i32 0
+; CHECK-NEXT:    [[RDX_SHUF4:%.*]] = shufflevector <4 x float> [[RDX_MINMAX_SELECT]], <4 x float> poison, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[RDX_MINMAX_CMP5:%.*]] = fcmp fast olt <4 x float> [[RDX_MINMAX_SELECT]], [[RDX_SHUF4]]
+; CHECK-NEXT:    [[RDX_MINMAX_SELECT6:%.*]] = select fast <4 x i1> [[RDX_MINMAX_CMP5]], <4 x float> [[RDX_MINMAX_SELECT]], <4 x float> [[RDX_SHUF4]]
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <4 x float> [[RDX_MINMAX_SELECT6]], i32 0
 ; CHECK-NEXT:    ret float [[TMP2]]
 ;
 entry:

diff  --git a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
index 245996cebc800..a9c8fee070605 100644
--- a/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
+++ b/llvm/test/Transforms/PhaseOrdering/assume-explosion.ll
@@ -1,8 +1,9 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt -O3 -S < %s | FileCheck %s
 
-; Confirm that we do not create assumes, clone them, 
-; and then cause a compile-time explosion trying to 
-; simplify them all. Ie, this can become nearly an 
+; Confirm that we do not create assumes, clone them,
+; and then cause a compile-time explosion trying to
+; simplify them all. Ie, this can become nearly an
 ; infinite-loop if things go bad.
 ; https://llvm.org/PR49785
 
@@ -15,12 +16,14 @@ target triple = "x86_64-apple-macosx11.0.0"
 @b = global i32 0, align 4
 @d = global i32 0, align 4
 
-; Not checking complete IR because it could be very 
-; large with vectorization and unrolling (thousands 
+; Not checking complete IR because it could be very
+; large with vectorization and unrolling (thousands
 ; of lines of IR).
 
 define void @f() #0 {
 ; CHECK-LABEL: @f(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    unreachable
 ;
 entry:
   store i32 5, i32* @c, align 4, !tbaa !3

diff  --git a/llvm/test/Transforms/PhaseOrdering/basic.ll b/llvm/test/Transforms/PhaseOrdering/basic.ll
index cc577c801da3a..ed138d2a0b986 100644
--- a/llvm/test/Transforms/PhaseOrdering/basic.ll
+++ b/llvm/test/Transforms/PhaseOrdering/basic.ll
@@ -32,9 +32,9 @@ define void @test1() nounwind ssp {
 define i32 @test2(i32 %a, i32* %p) nounwind uwtable ssp {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[DIV:%.*]] = lshr i32 [[A:%.*]], 2
-; CHECK-NEXT:    store i32 [[DIV]], i32* [[P:%.*]], align 4
-; CHECK-NEXT:    [[ADD:%.*]] = shl nuw nsw i32 [[DIV]], 1
+; CHECK-NEXT:    [[DIV1:%.*]] = lshr i32 [[A:%.*]], 2
+; CHECK-NEXT:    store i32 [[DIV1]], i32* [[P:%.*]], align 4
+; CHECK-NEXT:    [[ADD:%.*]] = shl nuw nsw i32 [[DIV1]], 1
 ; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
 ; CHECK-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX1]], align 4
 ; CHECK-NEXT:    ret i32 0

diff  --git a/llvm/test/Transforms/PhaseOrdering/d83507-knowledge-retention-bug.ll b/llvm/test/Transforms/PhaseOrdering/d83507-knowledge-retention-bug.ll
index e5dd8dbd77ec9..2e3ad1c9f5489 100644
--- a/llvm/test/Transforms/PhaseOrdering/d83507-knowledge-retention-bug.ll
+++ b/llvm/test/Transforms/PhaseOrdering/d83507-knowledge-retention-bug.ll
@@ -7,14 +7,19 @@
 define %0* @f1(%0* %i0) local_unnamed_addr {
 ; CHECK-LABEL: @f1(
 ; CHECK-NEXT:  bb:
-; CHECK:         br label [[BB3:%.*]]
+; CHECK-NEXT:    [[I21:%.*]] = icmp eq %0* [[I0:%.*]], null
+; CHECK-NEXT:    br i1 [[I21]], label [[BB6:%.*]], label [[BB3_LR_PH:%.*]]
+; CHECK:       bb3.lr.ph:
+; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb3:
-; CHECK-NEXT:    [[I1:%.*]] = phi %0* [ %i0, [[BB:%.*]] ], [ [[I5:%.*]], [[BB3]] ]
-; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(%0* [[I1]]) ]
-; CHECK-NEXT:    [[I4:%.*]] = getelementptr inbounds [[TMP0:%.*]], %0* [[I1]], i64 0, i32 0
+; CHECK-NEXT:    [[I3:%.*]] = phi %0* [ [[I0]], [[BB3_LR_PH]] ], [ [[I5:%.*]], [[BB3]] ]
+; CHECK-NEXT:    call void @llvm.assume(i1 true) [ "nonnull"(%0* [[I3]]) ]
+; CHECK-NEXT:    [[I4:%.*]] = getelementptr inbounds [[TMP0:%.*]], %0* [[I3]], i64 0, i32 0
 ; CHECK-NEXT:    [[I5]] = load %0*, %0** [[I4]], align 8
 ; CHECK-NEXT:    [[I2:%.*]] = icmp eq %0* [[I5]], null
-; CHECK-NEXT:    br i1 [[I2]], label [[BB6:%.*]], label [[BB3]]
+; CHECK-NEXT:    br i1 [[I2]], label [[BB1_BB6_CRIT_EDGE:%.*]], label [[BB3]]
+; CHECK:       bb1.bb6_crit_edge:
+; CHECK-NEXT:    br label [[BB6]]
 ; CHECK:       bb6:
 ; CHECK-NEXT:    ret %0* undef
 ;

diff  --git a/llvm/test/Transforms/PhaseOrdering/expect.ll b/llvm/test/Transforms/PhaseOrdering/expect.ll
index 3a23785ddff78..9f06bf328d74e 100644
--- a/llvm/test/Transforms/PhaseOrdering/expect.ll
+++ b/llvm/test/Transforms/PhaseOrdering/expect.ll
@@ -10,7 +10,7 @@ define void @PR49336(i32 %delta, i32 %tag_type, i8* %ip) {
 ; CHECK-LABEL: @PR49336(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[DELTA:%.*]], 0
-; CHECK-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END3:%.*]], !prof !0
+; CHECK-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END3:%.*]], !prof [[PROF0:![0-9]+]]
 ; CHECK:       if.then:
 ; CHECK-NEXT:    [[CMP1_NOT:%.*]] = icmp eq i32 [[TAG_TYPE:%.*]], 0
 ; CHECK-NEXT:    br i1 [[CMP1_NOT]], label [[IF_END3]], label [[IF_THEN2:%.*]]

diff  --git a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
index 9052fd5a03e4e..886bde2686bd9 100644
--- a/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
+++ b/llvm/test/Transforms/PhaseOrdering/instcombine-sroa-inttoptr.ll
@@ -72,7 +72,7 @@ define dso_local i32* @_Z3foo1S(%0* byval(%0) align 8 %arg) {
 ; CHECK-NEXT:    [[I1_SROA_0_0_COPYLOAD:%.*]] = load i32*, i32** [[I1_SROA_0_0_I5_SROA_IDX]], align 8
 ; CHECK-NEXT:    [[I_SROA_0_0_I6_SROA_IDX:%.*]] = getelementptr inbounds [[TMP0]], %0* [[I2]], i64 0, i32 0
 ; CHECK-NEXT:    store i32* [[I1_SROA_0_0_COPYLOAD]], i32** [[I_SROA_0_0_I6_SROA_IDX]], align 8
-; CHECK-NEXT:    tail call void @_Z7escape01S(%0* nonnull byval(%0) align 8 [[I2]])
+; CHECK-NEXT:    tail call void @_Z7escape01S(%0* nonnull byval([[TMP0]]) align 8 [[I2]])
 ; CHECK-NEXT:    ret i32* [[I1_SROA_0_0_COPYLOAD]]
 ;
 bb:

diff  --git a/llvm/test/Transforms/PhaseOrdering/pr39282.ll b/llvm/test/Transforms/PhaseOrdering/pr39282.ll
index 30e9378b9a0d1..9ab4b70909090 100644
--- a/llvm/test/Transforms/PhaseOrdering/pr39282.ll
+++ b/llvm/test/Transforms/PhaseOrdering/pr39282.ll
@@ -18,22 +18,22 @@ define void @copy(i32* noalias %to, i32* noalias %from) {
 define void @pr39282(i32* %addr1, i32* %addr2) {
 ; CHECK-LABEL: @pr39282(
 ; CHECK-NEXT:  start:
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META0:![0-9]+]])
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
 ; CHECK-NEXT:    [[X_I:%.*]] = load i32, i32* [[ADDR1:%.*]], align 4, !alias.scope !3, !noalias !0
 ; CHECK-NEXT:    store i32 [[X_I]], i32* [[ADDR2:%.*]], align 4, !alias.scope !0, !noalias !3
 ; CHECK-NEXT:    [[ADDR1I_1:%.*]] = getelementptr inbounds i32, i32* [[ADDR1]], i64 1
 ; CHECK-NEXT:    [[ADDR2I_1:%.*]] = getelementptr inbounds i32, i32* [[ADDR2]], i64 1
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META5:![0-9]+]])
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META7:![0-9]+]])
 ; CHECK-NEXT:    [[X_I_1:%.*]] = load i32, i32* [[ADDR1I_1]], align 4, !alias.scope !7, !noalias !5
 ; CHECK-NEXT:    store i32 [[X_I_1]], i32* [[ADDR2I_1]], align 4, !alias.scope !5, !noalias !7
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META9:![0-9]+]])
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META11:![0-9]+]])
 ; CHECK-NEXT:    [[X_I_2:%.*]] = load i32, i32* [[ADDR1]], align 4, !alias.scope !11, !noalias !9
 ; CHECK-NEXT:    store i32 [[X_I_2]], i32* [[ADDR2]], align 4, !alias.scope !9, !noalias !11
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
-; CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META13:![0-9]+]])
+; CHECK-NEXT:    tail call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]])
 ; CHECK-NEXT:    [[X_I_3:%.*]] = load i32, i32* [[ADDR1I_1]], align 4, !alias.scope !15, !noalias !13
 ; CHECK-NEXT:    store i32 [[X_I_3]], i32* [[ADDR2I_1]], align 4, !alias.scope !13, !noalias !15
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll b/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll
index 16dd203887ec0..d61cf3e9e6d85 100644
--- a/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll
+++ b/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll
@@ -35,27 +35,27 @@ define i1 @will_not_overflow(i64 %arg, i64 %arg1) {
 ; INSTCOMBINEONLY-NEXT:    [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
 ; INSTCOMBINEONLY-NEXT:    br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
 ; INSTCOMBINEONLY:       bb2:
-; INSTCOMBINEONLY-NEXT:    [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
-; INSTCOMBINEONLY-NEXT:    [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
+; INSTCOMBINEONLY-NEXT:    [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
+; INSTCOMBINEONLY-NEXT:    [[MUL_OV:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
 ; INSTCOMBINEONLY-NEXT:    br label [[BB5]]
 ; INSTCOMBINEONLY:       bb5:
-; INSTCOMBINEONLY-NEXT:    [[T6:%.*]] = phi i1 [ false, [[BB:%.*]] ], [ [[UMUL_OV]], [[BB2]] ]
+; INSTCOMBINEONLY-NEXT:    [[T6:%.*]] = phi i1 [ false, [[BB:%.*]] ], [ [[MUL_OV]], [[BB2]] ]
 ; INSTCOMBINEONLY-NEXT:    ret i1 [[T6]]
 ;
 ; INSTCOMBINESIMPLIFYCFGONLY-LABEL: @will_not_overflow(
 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT:  bb:
 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
-; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
-; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
-; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[T6:%.*]] = select i1 [[T0]], i1 false, i1 [[UMUL_OV]]
+; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
+; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[MUL_OV:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
+; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[T6:%.*]] = select i1 [[T0]], i1 false, i1 [[MUL_OV]]
 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    ret i1 [[T6]]
 ;
 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-LABEL: @will_not_overflow(
 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:  bb:
 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[ARG1_FR:%.*]] = freeze i64 [[ARG1:%.*]]
-; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG:%.*]], i64 [[ARG1_FR]])
-; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
-; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    ret i1 [[UMUL_OV]]
+; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG:%.*]], i64 [[ARG1_FR]])
+; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[MUL_OV:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
+; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    ret i1 [[MUL_OV]]
 ;
 bb:
   %t0 = icmp eq i64 %arg, 0
@@ -92,9 +92,9 @@ define i1 @will_overflow(i64 %arg, i64 %arg1) {
 ; INSTCOMBINEONLY-NEXT:    [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
 ; INSTCOMBINEONLY-NEXT:    br i1 [[T0]], label [[BB5:%.*]], label [[BB2:%.*]]
 ; INSTCOMBINEONLY:       bb2:
-; INSTCOMBINEONLY-NEXT:    [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
-; INSTCOMBINEONLY-NEXT:    [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
-; INSTCOMBINEONLY-NEXT:    [[PHI_BO:%.*]] = xor i1 [[UMUL_OV]], true
+; INSTCOMBINEONLY-NEXT:    [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
+; INSTCOMBINEONLY-NEXT:    [[MUL_OV:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
+; INSTCOMBINEONLY-NEXT:    [[PHI_BO:%.*]] = xor i1 [[MUL_OV]], true
 ; INSTCOMBINEONLY-NEXT:    br label [[BB5]]
 ; INSTCOMBINEONLY:       bb5:
 ; INSTCOMBINEONLY-NEXT:    [[T6:%.*]] = phi i1 [ true, [[BB:%.*]] ], [ [[PHI_BO]], [[BB2]] ]
@@ -103,18 +103,18 @@ define i1 @will_overflow(i64 %arg, i64 %arg1) {
 ; INSTCOMBINESIMPLIFYCFGONLY-LABEL: @will_overflow(
 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT:  bb:
 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[T0:%.*]] = icmp eq i64 [[ARG:%.*]], 0
-; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
-; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
-; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[PHI_BO:%.*]] = xor i1 [[UMUL_OV]], true
+; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG]], i64 [[ARG1:%.*]])
+; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[MUL_OV:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
+; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[PHI_BO:%.*]] = xor i1 [[MUL_OV]], true
 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    [[T6:%.*]] = select i1 [[T0]], i1 true, i1 [[PHI_BO]]
 ; INSTCOMBINESIMPLIFYCFGONLY-NEXT:    ret i1 [[T6]]
 ;
 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-LABEL: @will_overflow(
 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:  bb:
 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[ARG1_FR:%.*]] = freeze i64 [[ARG1:%.*]]
-; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[UMUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG:%.*]], i64 [[ARG1_FR]])
-; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1
-; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[PHI_BO:%.*]] = xor i1 [[UMUL_OV]], true
+; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[ARG:%.*]], i64 [[ARG1_FR]])
+; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[MUL_OV:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
+; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    [[PHI_BO:%.*]] = xor i1 [[MUL_OV]], true
 ; INSTCOMBINESIMPLIFYCFGINSTCOMBINE-NEXT:    ret i1 [[PHI_BO]]
 ;
 bb:


        


More information about the llvm-commits mailing list