[llvm] e86d6a4 - Regenerate test checks for tests affected by D141060

Alex Richardson via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 4 10:52:46 PDT 2023


Author: Alex Richardson
Date: 2023-10-04T10:51:35-07:00
New Revision: e86d6a43f03b6d635d8d1101a936088c8cf0cf23

URL: https://github.com/llvm/llvm-project/commit/e86d6a43f03b6d635d8d1101a936088c8cf0cf23
DIFF: https://github.com/llvm/llvm-project/commit/e86d6a43f03b6d635d8d1101a936088c8cf0cf23.diff

LOG: Regenerate test checks for tests affected by D141060

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/lower-lds-struct-aa-memcpy.ll
    llvm/test/CodeGen/AMDGPU/lower-module-lds-indirect-extern-uses-max-reachable-alignment.ll
    llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
    llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
    llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
    llvm/test/Transforms/InstCombine/double-float-shrink-2.ll
    llvm/test/Transforms/InstCombine/ffs-i16.ll
    llvm/test/Transforms/InstCombine/fls-i16.ll
    llvm/test/Transforms/InstCombine/isascii-i16.ll
    llvm/test/Transforms/InstCombine/isdigit-i16.ll
    llvm/test/Transforms/InstCombine/pow_fp_int.ll
    llvm/test/Transforms/InstCombine/printf-i16.ll
    llvm/test/Transforms/InstCombine/puts-i16.ll
    llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll
    llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
    llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
    llvm/test/Transforms/MergeICmps/X86/addressspaces.ll
    llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
    llvm/test/Transforms/SLPVectorizer/AArch64/splat-loads.ll
    llvm/test/Transforms/SLPVectorizer/X86/control-dependence.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/lower-lds-struct-aa-memcpy.ll b/llvm/test/CodeGen/AMDGPU/lower-lds-struct-aa-memcpy.ll
index 7a8a183ffbc92d0..48f533093858129 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-lds-struct-aa-memcpy.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-lds-struct-aa-memcpy.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals --version 3
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -O3 --amdgpu-lower-module-lds-strategy=module < %s | FileCheck -check-prefix=GCN %s
 ; RUN: opt -S -mtriple=amdgcn-- -amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
 ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=module < %s | FileCheck %s
@@ -31,19 +32,20 @@ define protected amdgpu_kernel void @test(ptr addrspace(1) nocapture %ptr.coerce
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[2:3]
 ; GCN-NEXT:    global_store_byte v0, v1, s[0:1]
 ; GCN-NEXT:    s_endpgm
-; CHECK-LABEL: @test(
+; CHECK-LABEL: define protected amdgpu_kernel void @test(
+; CHECK-SAME: ptr addrspace(1) nocapture [[PTR_COERCE:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    store i8 3, ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, align 4, !alias.scope !1, !noalias !4
-; CHECK-NEXT:    tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef align 1 dereferenceable(3) getelementptr inbounds (%llvm.amdgcn.kernel.test.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), ptr addrspace(3) noundef align 1 dereferenceable(3) @llvm.amdgcn.kernel.test.lds, i64 3, i1 false), !alias.scope !6, !noalias !7
-; CHECK-NEXT:    [[TMP4:%.*]] = load i8, ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.kernel.test.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), align 4, !alias.scope !4, !noalias !1
-; CHECK-NEXT:    [[CMP_I_I:%.*]] = icmp eq i8 [[TMP4]], 3
+; CHECK-NEXT:    tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef align 1 dereferenceable(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TEST_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), ptr addrspace(3) noundef align 1 dereferenceable(3) @llvm.amdgcn.kernel.test.lds, i64 3, i1 false), !alias.scope !6, !noalias !7
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TEST_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), align 4, !alias.scope !4, !noalias !1
+; CHECK-NEXT:    [[CMP_I_I:%.*]] = icmp eq i8 [[TMP0]], 3
 ; CHECK-NEXT:    store i8 2, ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, align 4, !alias.scope !1, !noalias !4
-; CHECK-NEXT: tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef align 1 dereferenceable(3) getelementptr inbounds (%llvm.amdgcn.kernel.test.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), ptr addrspace(3) noundef align 1 dereferenceable(3) @llvm.amdgcn.kernel.test.lds, i64 3, i1 false), !alias.scope !6, !noalias !7
-; CHECK-NEXT:    [[TMP9:%.*]] = load i8, ptr addrspace(3) getelementptr inbounds (%llvm.amdgcn.kernel.test.lds.t, ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), align 4, !alias.scope !4, !noalias !1
-; CHECK-NEXT:    [[CMP_I_I19:%.*]] = icmp eq i8 [[TMP9]], 2
-; CHECK-NEXT:    [[TMP10:%.*]] = and i1 [[CMP_I_I19]], [[CMP_I_I]]
-; CHECK-NEXT:    [[FROMBOOL8:%.*]] = zext i1 [[TMP10]] to i8
-; CHECK-NEXT:    store i8 [[FROMBOOL8]], ptr addrspace(1) [[PTR_COERCE:%.*]], align 1
+; CHECK-NEXT:    tail call void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noundef align 1 dereferenceable(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TEST_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), ptr addrspace(3) noundef align 1 dereferenceable(3) @llvm.amdgcn.kernel.test.lds, i64 3, i1 false), !alias.scope !6, !noalias !7
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_TEST_LDS_T]], ptr addrspace(3) @llvm.amdgcn.kernel.test.lds, i32 0, i32 2), align 4, !alias.scope !4, !noalias !1
+; CHECK-NEXT:    [[CMP_I_I19:%.*]] = icmp eq i8 [[TMP1]], 2
+; CHECK-NEXT:    [[TMP2:%.*]] = and i1 [[CMP_I_I19]], [[CMP_I_I]]
+; CHECK-NEXT:    [[FROMBOOL8:%.*]] = zext i1 [[TMP2]] to i8
+; CHECK-NEXT:    store i8 [[FROMBOOL8]], ptr addrspace(1) [[PTR_COERCE]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -64,7 +66,8 @@ entry:
 declare void @llvm.memcpy.p3.p3.i64(ptr addrspace(3) noalias nocapture writeonly, ptr addrspace(3) noalias nocapture readonly, i64, i1 immarg) #1
 
 ;.
-; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-lds-size"="7" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nounwind willreturn memory(argmem: readwrite) }
 ;.
 ; CHECK: [[META0:![0-9]+]] = !{i64 0, i64 1}
 ; CHECK: [[META1:![0-9]+]] = !{!2}

diff  --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-indirect-extern-uses-max-reachable-alignment.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-indirect-extern-uses-max-reachable-alignment.ll
index a5cc452a9c27ea2..c999332cb654237 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-indirect-extern-uses-max-reachable-alignment.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-indirect-extern-uses-max-reachable-alignment.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
 ; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds < %s | FileCheck %s
 
 ; Not reached by a non-kernel function and therefore not changed by this pass
@@ -33,7 +33,7 @@
 
 
 define amdgpu_kernel void @kernel_only() {
-; CHECK-LABEL: @kernel_only() {
+; CHECK-LABEL: define amdgpu_kernel void @kernel_only() {
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x double], ptr addrspace(3) @dynamic_kernel_only, i32 0, i32 0
 ; CHECK-NEXT:    store double 3.140000e+00, ptr addrspace(3) [[ARRAYIDX]], align 8
 ; CHECK-NEXT:    ret void
@@ -45,7 +45,7 @@ define amdgpu_kernel void @kernel_only() {
 
 ; The accesses from functions are rewritten to go through the llvm.amdgcn.dynlds.offset.table
 define void @use_shared1() {
-; CHECK-LABEL: @use_shared1() {
+; CHECK-LABEL: define void @use_shared1() {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[DYNAMIC_SHARED1:%.*]] = getelementptr inbounds [5 x i32], ptr addrspace(4) @llvm.amdgcn.dynlds.offset.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[DYNAMIC_SHARED1]], align 4
@@ -60,7 +60,8 @@ define void @use_shared1() {
 }
 
 define void @use_shared2() #0 {
-; CHECK-LABEL: @use_shared2() #0 {
+; CHECK-LABEL: define void @use_shared2(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[DYNAMIC_SHARED2:%.*]] = getelementptr inbounds [5 x i32], ptr addrspace(4) @llvm.amdgcn.dynlds.offset.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[DYNAMIC_SHARED2]], align 4
@@ -77,7 +78,8 @@ define void @use_shared2() #0 {
 ; Include a normal variable so that the new variables aren't all at the same absolute_symbol
 @static_shared = addrspace(3) global i32 poison
 define void @use_shared4() #0 {
-; CHECK-LABEL: @use_shared4() #0 {
+; CHECK-LABEL: define void @use_shared4(
+; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    store i32 4, ptr addrspace(3) @llvm.amdgcn.module.lds, align 4
 ; CHECK-NEXT:    [[DYNAMIC_SHARED4:%.*]] = getelementptr inbounds [5 x i32], ptr addrspace(4) @llvm.amdgcn.dynlds.offset.table, i32 0, i32 [[TMP1]]
@@ -94,7 +96,8 @@ define void @use_shared4() #0 {
 }
 
 define void @use_shared8() #0 {
-; CHECK-LABEL: @use_shared8() #0 {
+; CHECK-LABEL: define void @use_shared8(
+; CHECK-SAME: ) #[[ATTR0]] {
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; CHECK-NEXT:    [[DYNAMIC_SHARED8:%.*]] = getelementptr inbounds [5 x i32], ptr addrspace(4) @llvm.amdgcn.dynlds.offset.table, i32 0, i32 [[TMP1]]
 ; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[DYNAMIC_SHARED8]], align 4
@@ -110,7 +113,7 @@ define void @use_shared8() #0 {
 
 ; The kernels are annotated with kernel.id and llvm.donothing use of the corresponding variable
 define amdgpu_kernel void @expect_align1() {
-; CHECK-LABEL: @expect_align1() !llvm.amdgcn.lds.kernel.id !2
+; CHECK-LABEL: define amdgpu_kernel void @expect_align1() !llvm.amdgcn.lds.kernel.id !2 {
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.expect_align1.dynlds) ]
 ; CHECK-NEXT:    call void @use_shared1()
 ; CHECK-NEXT:    ret void
@@ -120,7 +123,7 @@ define amdgpu_kernel void @expect_align1() {
 }
 
 define amdgpu_kernel void @expect_align2() {
-; CHECK-LABEL: @expect_align2() !llvm.amdgcn.lds.kernel.id !3
+; CHECK-LABEL: define amdgpu_kernel void @expect_align2() !llvm.amdgcn.lds.kernel.id !3 {
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.expect_align2.dynlds) ]
 ; CHECK-NEXT:    call void @use_shared2()
 ; CHECK-NEXT:    ret void
@@ -130,7 +133,8 @@ define amdgpu_kernel void @expect_align2() {
 }
 
 define amdgpu_kernel void @expect_align4() {
-; CHECK-LABEL: @expect_align4() #1 !llvm.amdgcn.lds.kernel.id !4 {
+; CHECK-LABEL: define amdgpu_kernel void @expect_align4(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id !4 {
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.expect_align4.dynlds) ]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ]
 ; CHECK-NEXT:    call void @use_shared4()
@@ -142,7 +146,7 @@ define amdgpu_kernel void @expect_align4() {
 
 ; Use dynamic_shared directly too.
 define amdgpu_kernel void @expect_align8() {
-; CHECK-LABEL: @expect_align8() !llvm.amdgcn.lds.kernel.id !5 {
+; CHECK-LABEL: define amdgpu_kernel void @expect_align8() !llvm.amdgcn.lds.kernel.id !5 {
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.expect_align8.dynlds) ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [0 x i64], ptr addrspace(3) @dynamic_shared8, i32 0, i32 9
 ; CHECK-NEXT:    store i64 3, ptr addrspace(3) [[ARRAYIDX]], align 4
@@ -157,7 +161,8 @@ define amdgpu_kernel void @expect_align8() {
 
 ; Note: use_shared4 uses module.lds so this will allocate at offset 4
 define amdgpu_kernel void @expect_max_of_2_and_4() {
-; CHECK-LABEL: @expect_max_of_2_and_4() #1 !llvm.amdgcn.lds.kernel.id !6 {
+; CHECK-LABEL: define amdgpu_kernel void @expect_max_of_2_and_4(
+; CHECK-SAME: ) #[[ATTR1]] !llvm.amdgcn.lds.kernel.id !6 {
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.expect_max_of_2_and_4.dynlds) ]
 ; CHECK-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.module.lds) ]
 ; CHECK-NEXT:    call void @use_shared2()

diff  --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
index 61bb25f3e42ace6..82004b84275e6f1 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-via-table.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
 ; RUN: opt -S -mtriple=amdgcn--amdhsa -passes=amdgpu-lower-module-lds < %s --amdgpu-lower-module-lds-strategy=table | FileCheck -check-prefix=OPT %s
 ; RUN: llc -mtriple=amdgcn--amdhsa -verify-machineinstrs < %s --amdgpu-lower-module-lds-strategy=table | FileCheck -check-prefix=GCN %s
 
@@ -30,7 +31,7 @@
 
 
 define void @f0() {
-; OPT-LABEL: @f0(
+; OPT-LABEL: define void @f0() {
 ; OPT-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; OPT-NEXT:    [[V02:%.*]] = getelementptr inbounds [3 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0
 ; OPT-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[V02]], align 4
@@ -71,7 +72,7 @@ define void @f0() {
 }
 
 define void @f1() {
-; OPT-LABEL: @f1(
+; OPT-LABEL: define void @f1() {
 ; OPT-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; OPT-NEXT:    [[V12:%.*]] = getelementptr inbounds [3 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 1
 ; OPT-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[V12]], align 4
@@ -112,7 +113,7 @@ define void @f1() {
 }
 
 define void @f2() {
-; OPT-LABEL: @f2(
+; OPT-LABEL: define void @f2() {
 ; OPT-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; OPT-NEXT:    [[V22:%.*]] = getelementptr inbounds [3 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 2
 ; OPT-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[V22]], align 4
@@ -153,7 +154,7 @@ define void @f2() {
 }
 
 define void @f3() {
-; OPT-LABEL: @f3(
+; OPT-LABEL: define void @f3() {
 ; OPT-NEXT:    [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id()
 ; OPT-NEXT:    [[V32:%.*]] = getelementptr inbounds [3 x [4 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 3
 ; OPT-NEXT:    [[TMP2:%.*]] = load i32, ptr addrspace(4) [[V32]], align 4
@@ -195,7 +196,8 @@ define void @f3() {
 
 ; Doesn't access any via a function, won't be in the lookup table
 define amdgpu_kernel void @kernel_no_table() {
-; OPT-LABEL: @kernel_no_table() #0 {
+; OPT-LABEL: define amdgpu_kernel void @kernel_no_table(
+; OPT-SAME: ) #[[ATTR0:[0-9]+]] {
 ; OPT-NEXT:    [[LD:%.*]] = load i64, ptr addrspace(3) @llvm.amdgcn.kernel.kernel_no_table.lds, align 8
 ; OPT-NEXT:    [[MUL:%.*]] = mul i64 [[LD]], 8
 ; OPT-NEXT:    store i64 [[MUL]], ptr addrspace(3) @llvm.amdgcn.kernel.kernel_no_table.lds, align 8
@@ -218,8 +220,9 @@ define amdgpu_kernel void @kernel_no_table() {
 
 ; Access two variables, will allocate those two
 define amdgpu_kernel void @k01() {
-; OPT-LABEL: @k01() #0 !llvm.amdgcn.lds.kernel.id !1 {
-; OPT-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k01.lds) ]
+; OPT-LABEL: define amdgpu_kernel void @k01(
+; OPT-SAME: ) #[[ATTR0]] !llvm.amdgcn.lds.kernel.id !1 {
+; OPT-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k01.lds) ], !alias.scope !2, !noalias !5
 ; OPT-NEXT:    call void @f0()
 ; OPT-NEXT:    call void @f1()
 ; OPT-NEXT:    ret void
@@ -256,8 +259,9 @@ define amdgpu_kernel void @k01() {
 }
 
 define amdgpu_kernel void @k23() {
-; OPT-LABEL: @k23() #1 !llvm.amdgcn.lds.kernel.id !7 {
-; OPT-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k23.lds) ]
+; OPT-LABEL: define amdgpu_kernel void @k23(
+; OPT-SAME: ) #[[ATTR1:[0-9]+]] !llvm.amdgcn.lds.kernel.id !7 {
+; OPT-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k23.lds) ], !alias.scope !8, !noalias !11
 ; OPT-NEXT:    call void @f2()
 ; OPT-NEXT:    call void @f3()
 ; OPT-NEXT:    ret void
@@ -295,8 +299,9 @@ define amdgpu_kernel void @k23() {
 
 ; Access and allocate three variables
 define amdgpu_kernel void @k123() {
-; OPT-LABEL: @k123() #2 !llvm.amdgcn.lds.kernel.id !13 {
-; OPT-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k123.lds) ]
+; OPT-LABEL: define amdgpu_kernel void @k123(
+; OPT-SAME: ) #[[ATTR2:[0-9]+]] !llvm.amdgcn.lds.kernel.id !13 {
+; OPT-NEXT:    call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.k123.lds) ], !alias.scope !14, !noalias !17
 ; OPT-NEXT:    call void @f1()
 ; OPT-NEXT:    [[LD:%.*]] = load i8, ptr addrspace(3) getelementptr inbounds ([[LLVM_AMDGCN_KERNEL_K123_LDS_T:%.*]], ptr addrspace(3) @llvm.amdgcn.kernel.k123.lds, i32 0, i32 1), align 2, !alias.scope !20, !noalias !21
 ; OPT-NEXT:    [[MUL:%.*]] = mul i8 [[LD]], 8

diff  --git a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
index 60a5128a889c2ba..9a327f087bcd164 100644
--- a/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
+++ b/llvm/test/Transforms/AtomicExpand/PowerPC/cmpxchg.ll
@@ -14,35 +14,35 @@ define i1 @test_cmpxchg_seq_cst(ptr %addr, i128 %desire, i128 %new) {
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr i128 [[NEW]], 64
 ; CHECK-NEXT:    [[NEW_HI:%.*]] = trunc i128 [[TMP1]] to i64
 ; CHECK-NEXT:    call void @llvm.ppc.sync()
-; CHECK-NEXT:    [[TMP3:%.*]] = call { i64, i64 } @llvm.ppc.cmpxchg.i128(ptr [[ADDR:%.*]], i64 [[CMP_LO]], i64 [[CMP_HI]], i64 [[NEW_LO]], i64 [[NEW_HI]])
+; CHECK-NEXT:    [[TMP2:%.*]] = call { i64, i64 } @llvm.ppc.cmpxchg.i128(ptr [[ADDR:%.*]], i64 [[CMP_LO]], i64 [[CMP_HI]], i64 [[NEW_LO]], i64 [[NEW_HI]])
 ; CHECK-NEXT:    call void @llvm.ppc.lwsync()
-; CHECK-NEXT:    [[LO:%.*]] = extractvalue { i64, i64 } [[TMP3]], 0
-; CHECK-NEXT:    [[HI:%.*]] = extractvalue { i64, i64 } [[TMP3]], 1
+; CHECK-NEXT:    [[LO:%.*]] = extractvalue { i64, i64 } [[TMP2]], 0
+; CHECK-NEXT:    [[HI:%.*]] = extractvalue { i64, i64 } [[TMP2]], 1
 ; CHECK-NEXT:    [[LO64:%.*]] = zext i64 [[LO]] to i128
 ; CHECK-NEXT:    [[HI64:%.*]] = zext i64 [[HI]] to i128
-; CHECK-NEXT:    [[TMP4:%.*]] = shl i128 [[HI64]], 64
-; CHECK-NEXT:    [[VAL64:%.*]] = or i128 [[LO64]], [[TMP4]]
-; CHECK-NEXT:    [[TMP5:%.*]] = insertvalue { i128, i1 } poison, i128 [[VAL64]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = shl i128 [[HI64]], 64
+; CHECK-NEXT:    [[VAL64:%.*]] = or i128 [[LO64]], [[TMP3]]
+; CHECK-NEXT:    [[TMP4:%.*]] = insertvalue { i128, i1 } poison, i128 [[VAL64]], 0
 ; CHECK-NEXT:    [[SUCCESS:%.*]] = icmp eq i128 [[DESIRE]], [[VAL64]]
-; CHECK-NEXT:    [[TMP6:%.*]] = insertvalue { i128, i1 } [[TMP5]], i1 [[SUCCESS]], 1
-; CHECK-NEXT:    [[SUCC:%.*]] = extractvalue { i128, i1 } [[TMP6]], 1
+; CHECK-NEXT:    [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[SUCCESS]], 1
+; CHECK-NEXT:    [[SUCC:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
 ; CHECK-NEXT:    ret i1 [[SUCC]]
 ;
 ; PWR7-LABEL: @test_cmpxchg_seq_cst(
 ; PWR7-NEXT:  entry:
+; PWR7-NEXT:    [[TMP0:%.*]] = alloca i128, align 8
+; PWR7-NEXT:    call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP0]])
+; PWR7-NEXT:    store i128 [[DESIRE:%.*]], ptr [[TMP0]], align 8
 ; PWR7-NEXT:    [[TMP1:%.*]] = alloca i128, align 8
 ; PWR7-NEXT:    call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP1]])
-; PWR7-NEXT:    store i128 [[DESIRE:%.*]], ptr [[TMP1]], align 8
-; PWR7-NEXT:    [[TMP3:%.*]] = alloca i128, align 8
-; PWR7-NEXT:    call void @llvm.lifetime.start.p0(i64 16, ptr [[TMP3]])
-; PWR7-NEXT:    store i128 [[NEW:%.*]], ptr [[TMP3]], align 8
-; PWR7-NEXT:    [[TMP5:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, ptr [[ADDR:%.*]], ptr [[TMP1]], ptr [[TMP3]], i32 5, i32 5)
-; PWR7-NEXT:    call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP3]])
-; PWR7-NEXT:    [[TMP6:%.*]] = load i128, ptr [[TMP1]], align 8
+; PWR7-NEXT:    store i128 [[NEW:%.*]], ptr [[TMP1]], align 8
+; PWR7-NEXT:    [[TMP2:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 16, ptr [[ADDR:%.*]], ptr [[TMP0]], ptr [[TMP1]], i32 5, i32 5)
 ; PWR7-NEXT:    call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP1]])
-; PWR7-NEXT:    [[TMP7:%.*]] = insertvalue { i128, i1 } poison, i128 [[TMP6]], 0
-; PWR7-NEXT:    [[TMP8:%.*]] = insertvalue { i128, i1 } [[TMP7]], i1 [[TMP5]], 1
-; PWR7-NEXT:    [[SUCC:%.*]] = extractvalue { i128, i1 } [[TMP8]], 1
+; PWR7-NEXT:    [[TMP3:%.*]] = load i128, ptr [[TMP0]], align 8
+; PWR7-NEXT:    call void @llvm.lifetime.end.p0(i64 16, ptr [[TMP0]])
+; PWR7-NEXT:    [[TMP4:%.*]] = insertvalue { i128, i1 } poison, i128 [[TMP3]], 0
+; PWR7-NEXT:    [[TMP5:%.*]] = insertvalue { i128, i1 } [[TMP4]], i1 [[TMP2]], 1
+; PWR7-NEXT:    [[SUCC:%.*]] = extractvalue { i128, i1 } [[TMP5]], 1
 ; PWR7-NEXT:    ret i1 [[SUCC]]
 ;
 entry:

diff  --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
index 6343a0c697fd388..826b9be7a1e5d33 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-libcall.ll
@@ -4,12 +4,12 @@
 
 define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
 ; CHECK-LABEL: @atomic_load256_libcall(
-; CHECK-NEXT:    [[TMP2:%.*]] = alloca i256, align 8
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP2]])
-; CHECK-NEXT:    call void @__atomic_load(i64 32, ptr [[PTR:%.*]], ptr [[TMP2]], i32 0)
-; CHECK-NEXT:    [[TMP4:%.*]] = load i256, ptr [[TMP2]], align 8
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP2]])
-; CHECK-NEXT:    ret i256 [[TMP4]]
+; CHECK-NEXT:    [[TMP1:%.*]] = alloca i256, align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP1]])
+; CHECK-NEXT:    call void @__atomic_load(i64 32, ptr [[PTR:%.*]], ptr [[TMP1]], i32 0)
+; CHECK-NEXT:    [[TMP2:%.*]] = load i256, ptr [[TMP1]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP1]])
+; CHECK-NEXT:    ret i256 [[TMP2]]
 ;
   %result = load atomic i256, ptr %ptr unordered, align 16
   ret i256 %result
@@ -17,13 +17,13 @@ define i256 @atomic_load256_libcall(ptr %ptr) nounwind {
 
 define i256 @atomic_load256_libcall_as1(ptr addrspace(1) %ptr) nounwind {
 ; CHECK-LABEL: @atomic_load256_libcall_as1(
-; CHECK-NEXT:    [[TMP2:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr
-; CHECK-NEXT:    [[TMP3:%.*]] = alloca i256, align 8
-; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP3]])
-; CHECK-NEXT:    call void @__atomic_load(i64 32, ptr [[TMP2]], ptr [[TMP3]], i32 0)
-; CHECK-NEXT:    [[TMP5:%.*]] = load i256, ptr [[TMP3]], align 8
-; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP3]])
-; CHECK-NEXT:    ret i256 [[TMP5]]
+; CHECK-NEXT:    [[TMP1:%.*]] = addrspacecast ptr addrspace(1) [[PTR:%.*]] to ptr
+; CHECK-NEXT:    [[TMP2:%.*]] = alloca i256, align 8
+; CHECK-NEXT:    call void @llvm.lifetime.start.p0(i64 32, ptr [[TMP2]])
+; CHECK-NEXT:    call void @__atomic_load(i64 32, ptr [[TMP1]], ptr [[TMP2]], i32 0)
+; CHECK-NEXT:    [[TMP3:%.*]] = load i256, ptr [[TMP2]], align 8
+; CHECK-NEXT:    call void @llvm.lifetime.end.p0(i64 32, ptr [[TMP2]])
+; CHECK-NEXT:    ret i256 [[TMP3]]
 ;
   %result = load atomic i256, ptr addrspace(1) %ptr unordered, align 16
   ret i256 %result

diff  --git a/llvm/test/Transforms/InstCombine/double-float-shrink-2.ll b/llvm/test/Transforms/InstCombine/double-float-shrink-2.ll
index 9f8dc9e8ac234c9..24ccad4e157b936 100644
--- a/llvm/test/Transforms/InstCombine/double-float-shrink-2.ll
+++ b/llvm/test/Transforms/InstCombine/double-float-shrink-2.ll
@@ -41,8 +41,8 @@ declare <2 x double> @llvm.trunc.v2f64(<2 x double>)
 
 define float @test_shrink_libcall_floor(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_floor(
-; CHECK-NEXT:    [[F:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.floor.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   ; --> floorf
@@ -53,8 +53,8 @@ define float @test_shrink_libcall_floor(float %C) {
 
 define float @test_shrink_libcall_ceil(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_ceil(
-; CHECK-NEXT:    [[F:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   ; --> ceilf
@@ -65,8 +65,8 @@ define float @test_shrink_libcall_ceil(float %C) {
 
 define float @test_shrink_libcall_round(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_round(
-; CHECK-NEXT:    [[F:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.round.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   ; --> roundf
@@ -77,8 +77,8 @@ define float @test_shrink_libcall_round(float %C) {
 
 define float @test_shrink_libcall_roundeven(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_roundeven(
-; CHECK-NEXT:    [[F:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   ; --> roundeven
@@ -89,8 +89,8 @@ define float @test_shrink_libcall_roundeven(float %C) {
 
 define float @test_shrink_libcall_nearbyint(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_nearbyint(
-; CHECK-NEXT:    [[F:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.nearbyint.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   ; --> nearbyintf
@@ -101,8 +101,8 @@ define float @test_shrink_libcall_nearbyint(float %C) {
 
 define float @test_shrink_libcall_trunc(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_trunc(
-; CHECK-NEXT:    [[F:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   ; --> truncf
@@ -115,8 +115,8 @@ define float @test_shrink_libcall_trunc(float %C) {
 ; tested platforms.
 define float @test_shrink_libcall_fabs(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_fabs(
-; CHECK-NEXT:    [[F:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   %E = call double @fabs(double %D)
@@ -127,8 +127,8 @@ define float @test_shrink_libcall_fabs(float %C) {
 ; Make sure fast math flags are preserved
 define float @test_shrink_libcall_fabs_fast(float %C) {
 ; CHECK-LABEL: @test_shrink_libcall_fabs_fast(
-; CHECK-NEXT:    [[F:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
-; CHECK-NEXT:    ret float [[F]]
+; CHECK-NEXT:    [[TMP1:%.*]] = call fast float @llvm.fabs.f32(float [[C:%.*]])
+; CHECK-NEXT:    ret float [[TMP1]]
 ;
   %D = fpext float %C to double
   %E = call fast double @fabs(double %D)

diff  --git a/llvm/test/Transforms/InstCombine/ffs-i16.ll b/llvm/test/Transforms/InstCombine/ffs-i16.ll
index f86ae34e340fb1c..f567a9488d4985b 100644
--- a/llvm/test/Transforms/InstCombine/ffs-i16.ll
+++ b/llvm/test/Transforms/InstCombine/ffs-i16.ll
@@ -3,8 +3,8 @@
 ; Test that the ffs library call simplifier works correctly even for
 ; targets with 16-bit int.
 ;
-; RUN: opt < %s -mtriple=avr-linux -passes=instcombine -S | FileCheck %s
-; RUN: opt < %s -mtriple=msp430-linux -passes=instcombine -S | FileCheck %s
+; RUN: opt < %s -mtriple=avr-linux -passes=instcombine -S | FileCheck %s --check-prefix=AVR
+; RUN: opt < %s -mtriple=msp430-linux -passes=instcombine -S | FileCheck %s --check-prefix=MSP430
 
 declare i16 @ffs(i16)
 
@@ -12,15 +12,25 @@ declare void @sink(i16)
 
 
 define void @fold_ffs(i16 %x) {
-; CHECK-LABEL: @fold_ffs(
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    [[CTTZ:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[CTTZ]], 1
-; CHECK-NEXT:    [[DOTNOT:%.*]] = icmp eq i16 [[X]], 0
-; CHECK-NEXT:    [[NX:%.*]] = select i1 [[DOTNOT]], i16 0, i16 [[TMP1]]
-; CHECK-NEXT:    call void @sink(i16 [[NX]])
-; CHECK-NEXT:    ret void
+; AVR-LABEL: @fold_ffs(
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    [[CTTZ:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true), !range [[RNG0:![0-9]+]]
+; AVR-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[CTTZ]], 1
+; AVR-NEXT:    [[DOTNOT:%.*]] = icmp eq i16 [[X]], 0
+; AVR-NEXT:    [[NX:%.*]] = select i1 [[DOTNOT]], i16 0, i16 [[TMP1]]
+; AVR-NEXT:    call void @sink(i16 [[NX]])
+; AVR-NEXT:    ret void
+;
+; MSP430-LABEL: @fold_ffs(
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    [[CTTZ:%.*]] = call i16 @llvm.cttz.i16(i16 [[X:%.*]], i1 true), !range [[RNG0:![0-9]+]]
+; MSP430-NEXT:    [[TMP1:%.*]] = add nuw nsw i16 [[CTTZ]], 1
+; MSP430-NEXT:    [[DOTNOT:%.*]] = icmp eq i16 [[X]], 0
+; MSP430-NEXT:    [[NX:%.*]] = select i1 [[DOTNOT]], i16 0, i16 [[TMP1]]
+; MSP430-NEXT:    call void @sink(i16 [[NX]])
+; MSP430-NEXT:    ret void
 ;
   %n0 = call i16 @ffs(i16 0)
   call void @sink(i16 %n0)

diff  --git a/llvm/test/Transforms/InstCombine/fls-i16.ll b/llvm/test/Transforms/InstCombine/fls-i16.ll
index e19b230d3163d50..ba0471665a48214 100644
--- a/llvm/test/Transforms/InstCombine/fls-i16.ll
+++ b/llvm/test/Transforms/InstCombine/fls-i16.ll
@@ -4,8 +4,8 @@
 ; targets with 16-bit int.  Although fls is available on a number of
 ; targets it's supported (hardcoded as available) only on FreeBSD.
 ;
-; RUN: opt < %s -mtriple=avr-freebsd -passes=instcombine -S | FileCheck %s
-; RUN: opt < %s -mtriple=msp430-freebsd -passes=instcombine -S | FileCheck %s
+; RUN: opt < %s -mtriple=avr-freebsd -passes=instcombine -S | FileCheck %s --check-prefix=AVR
+; RUN: opt < %s -mtriple=msp430-freebsd -passes=instcombine -S | FileCheck %s --check-prefix=MSP430
 
 declare i16 @fls(i16)
 
@@ -13,13 +13,21 @@ declare void @sink(i16)
 
 
 define void @fold_fls(i16 %x) {
-; CHECK-LABEL: @fold_fls(
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    [[CTLZ:%.*]] = call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0:![0-9]+]]
-; CHECK-NEXT:    [[NX:%.*]] = sub nuw nsw i16 16, [[CTLZ]]
-; CHECK-NEXT:    call void @sink(i16 [[NX]])
-; CHECK-NEXT:    ret void
+; AVR-LABEL: @fold_fls(
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    [[CTLZ:%.*]] = call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0:![0-9]+]]
+; AVR-NEXT:    [[NX:%.*]] = sub nuw nsw i16 16, [[CTLZ]]
+; AVR-NEXT:    call void @sink(i16 [[NX]])
+; AVR-NEXT:    ret void
+;
+; MSP430-LABEL: @fold_fls(
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    [[CTLZ:%.*]] = call i16 @llvm.ctlz.i16(i16 [[X:%.*]], i1 false), !range [[RNG0:![0-9]+]]
+; MSP430-NEXT:    [[NX:%.*]] = sub nuw nsw i16 16, [[CTLZ]]
+; MSP430-NEXT:    call void @sink(i16 [[NX]])
+; MSP430-NEXT:    ret void
 ;
   %n0 = call i16 @fls(i16 0)
   call void @sink(i16 %n0)

diff  --git a/llvm/test/Transforms/InstCombine/isascii-i16.ll b/llvm/test/Transforms/InstCombine/isascii-i16.ll
index ef5d8c75d80c824..2cac57e09c37eb9 100644
--- a/llvm/test/Transforms/InstCombine/isascii-i16.ll
+++ b/llvm/test/Transforms/InstCombine/isascii-i16.ll
@@ -2,8 +2,8 @@
 ; Test that the isascii library call simplifier works correctly even for
 ; targets with 16-bit int.
 ;
-; RUN: opt < %s -mtriple=avr-freebsd -passes=instcombine -S | FileCheck %s
-; RUN: opt < %s -mtriple=msp430-linux -passes=instcombine -S | FileCheck %s
+; RUN: opt < %s -mtriple=avr-freebsd -passes=instcombine -S | FileCheck %s --check-prefix=AVR
+; RUN: opt < %s -mtriple=msp430-linux -passes=instcombine -S | FileCheck %s --check-prefix=MSP430
 
 declare i16 @isascii(i16)
 
@@ -11,19 +11,33 @@ declare void @sink(i16)
 
 
 define void @fold_isascii(i16 %c) {
-; CHECK-LABEL: @fold_isascii(
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    [[ISASCII:%.*]] = icmp ult i16 [[C:%.*]], 128
-; CHECK-NEXT:    [[IC:%.*]] = zext i1 [[ISASCII]] to i16
-; CHECK-NEXT:    call void @sink(i16 [[IC]])
-; CHECK-NEXT:    ret void
+; AVR-LABEL: @fold_isascii(
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    [[ISASCII:%.*]] = icmp ult i16 [[C:%.*]], 128
+; AVR-NEXT:    [[IC:%.*]] = zext i1 [[ISASCII]] to i16
+; AVR-NEXT:    call void @sink(i16 [[IC]])
+; AVR-NEXT:    ret void
+;
+; MSP430-LABEL: @fold_isascii(
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    [[ISASCII:%.*]] = icmp ult i16 [[C:%.*]], 128
+; MSP430-NEXT:    [[IC:%.*]] = zext i1 [[ISASCII]] to i16
+; MSP430-NEXT:    call void @sink(i16 [[IC]])
+; MSP430-NEXT:    ret void
 ;
   %i0 = call i16 @isascii(i16 0)
   call void @sink(i16 %i0)

diff  --git a/llvm/test/Transforms/InstCombine/isdigit-i16.ll b/llvm/test/Transforms/InstCombine/isdigit-i16.ll
index 7e63798ca438cbd..69f7b7f2e1f702e 100644
--- a/llvm/test/Transforms/InstCombine/isdigit-i16.ll
+++ b/llvm/test/Transforms/InstCombine/isdigit-i16.ll
@@ -2,32 +2,51 @@
 ; Test that the isdigit library call simplifier works correctly even for
 ; targets with 16-bit int.
 ;
-; RUN: opt < %s -mtriple=avr-linux -passes=instcombine -S | FileCheck %s
-; RUN: opt < %s -mtriple=msp430-freebsd -passes=instcombine -S | FileCheck %s
+; RUN: opt < %s -mtriple=avr-linux -passes=instcombine -S | FileCheck %s --check-prefix=AVR
+; RUN: opt < %s -mtriple=msp430-freebsd -passes=instcombine -S | FileCheck %s  --check-prefix=MSP430
 
 declare i16 @isdigit(i16)
 
 declare void @sink(i16)
 
 define void @fold_isdigit(i16 %c) {
-; CHECK-LABEL: @fold_isdigit(
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    call void @sink(i16 1)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    call void @sink(i16 0)
-; CHECK-NEXT:    [[ISDIGITTMP:%.*]] = add i16 [[C:%.*]], -48
-; CHECK-NEXT:    [[ISDIGIT:%.*]] = icmp ult i16 [[ISDIGITTMP]], 10
-; CHECK-NEXT:    [[IC:%.*]] = zext i1 [[ISDIGIT]] to i16
-; CHECK-NEXT:    call void @sink(i16 [[IC]])
-; CHECK-NEXT:    ret void
+; AVR-LABEL: @fold_isdigit(
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    call void @sink(i16 1)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    call void @sink(i16 0)
+; AVR-NEXT:    [[ISDIGITTMP:%.*]] = add i16 [[C:%.*]], -48
+; AVR-NEXT:    [[ISDIGIT:%.*]] = icmp ult i16 [[ISDIGITTMP]], 10
+; AVR-NEXT:    [[IC:%.*]] = zext i1 [[ISDIGIT]] to i16
+; AVR-NEXT:    call void @sink(i16 [[IC]])
+; AVR-NEXT:    ret void
+;
+; MSP430-LABEL: @fold_isdigit(
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    call void @sink(i16 1)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    call void @sink(i16 0)
+; MSP430-NEXT:    [[ISDIGITTMP:%.*]] = add i16 [[C:%.*]], -48
+; MSP430-NEXT:    [[ISDIGIT:%.*]] = icmp ult i16 [[ISDIGITTMP]], 10
+; MSP430-NEXT:    [[IC:%.*]] = zext i1 [[ISDIGIT]] to i16
+; MSP430-NEXT:    call void @sink(i16 [[IC]])
+; MSP430-NEXT:    ret void
 ;
   %i0 = call i16 @isdigit(i16 0)
   call void @sink(i16 %i0)

diff  --git a/llvm/test/Transforms/InstCombine/pow_fp_int.ll b/llvm/test/Transforms/InstCombine/pow_fp_int.ll
index 8718df3e86638a9..e4d74f6bacb06a9 100644
--- a/llvm/test/Transforms/InstCombine/pow_fp_int.ll
+++ b/llvm/test/Transforms/InstCombine/pow_fp_int.ll
@@ -1,12 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
 ; RUN: opt -mtriple unknown -passes=instcombine -S < %s | FileCheck %s
 
 ; PR42190
 ; Can't generate test checks due to PR42740.
 
 define double @pow_sitofp_const_base_fast(i32 %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[X:%.*]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP1]] to double
+; CHECK-LABEL: define double @pow_sitofp_const_base_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[X]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = sitofp i32 %x to float
@@ -16,10 +18,11 @@ define double @pow_sitofp_const_base_fast(i32 %x) {
 }
 
 define double @pow_uitofp_const_base_fast(i31 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
+; CHECK-LABEL: define double @pow_uitofp_const_base_fast(
+; CHECK-SAME: i31 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X]] to i32
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = uitofp i31 %x to float
@@ -29,9 +32,10 @@ define double @pow_uitofp_const_base_fast(i31 %x) {
 }
 
 define double @pow_sitofp_double_const_base_fast(i32 %x) {
-; CHECK-LABEL: @pow_sitofp_double_const_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call afn double @llvm.powi.f64.i32(double 7.000000e+00, i32 [[X:%.*]])
-; CHECK-NEXT:    ret double [[TMP1]]
+; CHECK-LABEL: define double @pow_sitofp_double_const_base_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn double @llvm.powi.f64.i32(double 7.000000e+00, i32 [[X]])
+; CHECK-NEXT:    ret double [[POW]]
 ;
   %subfp = sitofp i32 %x to double
   %pow = tail call afn double @llvm.pow.f64(double 7.000000e+00, double %subfp)
@@ -39,10 +43,11 @@ define double @pow_sitofp_double_const_base_fast(i32 %x) {
 }
 
 define double @pow_uitofp_double_const_base_fast(i31 %x) {
-; CHECK-LABEL: @pow_uitofp_double_const_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn double @llvm.powi.f64.i32(double 7.000000e+00, i32 [[TMP1]])
-; CHECK-NEXT:    ret double [[TMP2]]
+; CHECK-LABEL: define double @pow_uitofp_double_const_base_fast(
+; CHECK-SAME: i31 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X]] to i32
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn double @llvm.powi.f64.i32(double 7.000000e+00, i32 [[TMP1]])
+; CHECK-NEXT:    ret double [[POW]]
 ;
   %subfp = uitofp i31 %x to double
   %pow = tail call afn double @llvm.pow.f64(double 7.000000e+00, double %subfp)
@@ -50,8 +55,9 @@ define double @pow_uitofp_double_const_base_fast(i31 %x) {
 }
 
 define double @pow_sitofp_double_const_base_2_fast(i32 %x) {
-; CHECK-LABEL: @pow_sitofp_double_const_base_2_fast(
-; CHECK-NEXT:    [[LDEXPF:%.*]] = tail call afn float @ldexpf(float 1.000000e+00, i32 [[X:%.*]])
+; CHECK-LABEL: define double @pow_sitofp_double_const_base_2_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[LDEXPF:%.*]] = tail call afn float @ldexpf(float 1.000000e+00, i32 [[X]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
@@ -62,8 +68,9 @@ define double @pow_sitofp_double_const_base_2_fast(i32 %x) {
 }
 
 define double @pow_sitofp_double_const_base_power_of_2_fast(i32 %x) {
-; CHECK-LABEL: @pow_sitofp_double_const_base_power_of_2_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_sitofp_double_const_base_power_of_2_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[SUBFP]], 4.000000e+00
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call afn float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
@@ -76,8 +83,9 @@ define double @pow_sitofp_double_const_base_power_of_2_fast(i32 %x) {
 }
 
 define double @pow_uitofp_const_base_2_fast(i31 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_2_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
+; CHECK-LABEL: define double @pow_uitofp_const_base_2_fast(
+; CHECK-SAME: i31 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X]] to i32
 ; CHECK-NEXT:    [[LDEXPF:%.*]] = tail call afn float @ldexpf(float 1.000000e+00, i32 [[TMP1]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
 ; CHECK-NEXT:    ret double [[RES]]
@@ -89,8 +97,9 @@ define double @pow_uitofp_const_base_2_fast(i31 %x) {
 }
 
 define double @pow_uitofp_const_base_power_of_2_fast(i31 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_power_of_2_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i31 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_uitofp_const_base_power_of_2_fast(
+; CHECK-SAME: i31 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i31 [[X]] to float
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul afn float [[SUBFP]], 4.000000e+00
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call afn float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
@@ -103,9 +112,10 @@ define double @pow_uitofp_const_base_power_of_2_fast(i31 %x) {
 }
 
 define double @pow_sitofp_float_base_fast(float %base, i32 %x) {
-; CHECK-LABEL: @pow_sitofp_float_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call afn float @llvm.powi.f32.i32(float [[BASE:%.*]], i32 [[X:%.*]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP1]] to double
+; CHECK-LABEL: define double @pow_sitofp_float_base_fast(
+; CHECK-SAME: float [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float [[BASE]], i32 [[X]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = sitofp i32 %x to float
@@ -115,10 +125,11 @@ define double @pow_sitofp_float_base_fast(float %base, i32 %x) {
 }
 
 define double @pow_uitofp_float_base_fast(float %base, i31 %x) {
-; CHECK-LABEL: @pow_uitofp_float_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn float @llvm.powi.f32.i32(float [[BASE:%.*]], i32 [[TMP1]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
+; CHECK-LABEL: define double @pow_uitofp_float_base_fast(
+; CHECK-SAME: float [[BASE:%.*]], i31 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X]] to i32
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float [[BASE]], i32 [[TMP1]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = uitofp i31 %x to float
@@ -128,9 +139,10 @@ define double @pow_uitofp_float_base_fast(float %base, i31 %x) {
 }
 
 define double @pow_sitofp_double_base_fast(double %base, i32 %x) {
-; CHECK-LABEL: @pow_sitofp_double_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call afn double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 [[X:%.*]])
-; CHECK-NEXT:    ret double [[TMP1]]
+; CHECK-LABEL: define double @pow_sitofp_double_base_fast(
+; CHECK-SAME: double [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[RES:%.*]] = tail call afn double @llvm.powi.f64.i32(double [[BASE]], i32 [[X]])
+; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = sitofp i32 %x to double
   %res = tail call afn double @llvm.pow.f64(double %base, double %subfp)
@@ -138,10 +150,11 @@ define double @pow_sitofp_double_base_fast(double %base, i32 %x) {
 }
 
 define double @pow_uitofp_double_base_fast(double %base, i31 %x) {
-; CHECK-LABEL: @pow_uitofp_double_base_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 [[TMP1]])
-; CHECK-NEXT:    ret double [[TMP2]]
+; CHECK-LABEL: define double @pow_uitofp_double_base_fast(
+; CHECK-SAME: double [[BASE:%.*]], i31 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i31 [[X]] to i32
+; CHECK-NEXT:    [[RES:%.*]] = tail call afn double @llvm.powi.f64.i32(double [[BASE]], i32 [[TMP1]])
+; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = uitofp i31 %x to double
   %res = tail call afn double @llvm.pow.f64(double %base, double %subfp)
@@ -149,10 +162,11 @@ define double @pow_uitofp_double_base_fast(double %base, i31 %x) {
 }
 
 define double @pow_sitofp_const_base_fast_i8(i8 %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_fast_i8(
-; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
+; CHECK-LABEL: define double @pow_sitofp_const_base_fast_i8(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i8 [[X]] to i32
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = sitofp i8 %x to float
@@ -162,10 +176,11 @@ define double @pow_sitofp_const_base_fast_i8(i8 %x) {
 }
 
 define double @pow_sitofp_const_base_fast_i16(i16 %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_fast_i16(
-; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
+; CHECK-LABEL: define double @pow_sitofp_const_base_fast_i16(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = sext i16 [[X]] to i32
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = sitofp i16 %x to float
@@ -176,10 +191,11 @@ define double @pow_sitofp_const_base_fast_i16(i16 %x) {
 
 
 define double @pow_uitofp_const_base_fast_i8(i8 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_fast_i8(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
+; CHECK-LABEL: define double @pow_uitofp_const_base_fast_i8(
+; CHECK-SAME: i8 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[X]] to i32
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = uitofp i8 %x to float
@@ -189,10 +205,11 @@ define double @pow_uitofp_const_base_fast_i8(i8 %x) {
 }
 
 define double @pow_uitofp_const_base_fast_i16(i16 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_fast_i16(
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32
-; CHECK-NEXT:    [[TMP2:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
-; CHECK-NEXT:    [[RES:%.*]] = fpext float [[TMP2]] to double
+; CHECK-LABEL: define double @pow_uitofp_const_base_fast_i16(
+; CHECK-SAME: i16 [[X:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = zext i16 [[X]] to i32
+; CHECK-NEXT:    [[POW:%.*]] = tail call afn float @llvm.powi.f32.i32(float 7.000000e+00, i32 [[TMP1]])
+; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = uitofp i16 %x to float
@@ -202,18 +219,20 @@ define double @pow_uitofp_const_base_fast_i16(i16 %x) {
 }
 
 define double @powf_exp_const_int_fast(double %base) {
-; CHECK-LABEL: @powf_exp_const_int_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 40)
-; CHECK-NEXT:    ret double [[TMP1]]
+; CHECK-LABEL: define double @powf_exp_const_int_fast(
+; CHECK-SAME: double [[BASE:%.*]]) {
+; CHECK-NEXT:    [[RES:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[BASE]], i32 40)
+; CHECK-NEXT:    ret double [[RES]]
 ;
   %res = tail call fast double @llvm.pow.f64(double %base, double 4.000000e+01)
   ret double %res
 }
 
 define double @powf_exp_const2_int_fast(double %base) {
-; CHECK-LABEL: @powf_exp_const2_int_fast(
-; CHECK-NEXT:    [[TMP1:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[BASE:%.*]], i32 -40)
-; CHECK-NEXT:    ret double [[TMP1]]
+; CHECK-LABEL: define double @powf_exp_const2_int_fast(
+; CHECK-SAME: double [[BASE:%.*]]) {
+; CHECK-NEXT:    [[RES:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[BASE]], i32 -40)
+; CHECK-NEXT:    ret double [[RES]]
 ;
   %res = tail call fast double @llvm.pow.f64(double %base, double -4.000000e+01)
   ret double %res
@@ -222,9 +241,10 @@ define double @powf_exp_const2_int_fast(double %base) {
 ; Negative tests
 
 define double @pow_uitofp_const_base_fast_i32(i32 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_fast_i32(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
-; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757{{.*}}
+; CHECK-LABEL: define double @pow_uitofp_const_base_fast_i32(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757680000000
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call fast float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
 ; CHECK-NEXT:    ret double [[RES]]
@@ -236,8 +256,9 @@ define double @pow_uitofp_const_base_fast_i32(i32 %x) {
 }
 
 define double @pow_uitofp_const_base_2_fast_i32(i32 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_2_fast_i32(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_uitofp_const_base_2_fast_i32(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call fast float @llvm.exp2.f32(float [[SUBFP]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
 ; CHECK-NEXT:    ret double [[RES]]
@@ -249,8 +270,9 @@ define double @pow_uitofp_const_base_2_fast_i32(i32 %x) {
 }
 
 define double @pow_uitofp_const_base_power_of_2_fast_i32(i32 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_power_of_2_fast_i32(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_uitofp_const_base_power_of_2_fast_i32(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 4.000000e+00
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call fast float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
@@ -263,9 +285,10 @@ define double @pow_uitofp_const_base_power_of_2_fast_i32(i32 %x) {
 }
 
 define double @pow_uitofp_float_base_fast_i32(float %base, i32 %x) {
-; CHECK-LABEL: @pow_uitofp_float_base_fast_i32(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
-; CHECK-NEXT:    [[POW:%.*]] = tail call fast float @llvm.pow.f32(float [[BASE:%.*]], float [[SUBFP]])
+; CHECK-LABEL: define double @pow_uitofp_float_base_fast_i32(
+; CHECK-SAME: float [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
+; CHECK-NEXT:    [[POW:%.*]] = tail call fast float @llvm.pow.f32(float [[BASE]], float [[SUBFP]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
@@ -276,9 +299,10 @@ define double @pow_uitofp_float_base_fast_i32(float %base, i32 %x) {
 }
 
 define double @pow_uitofp_double_base_fast_i32(double %base, i32 %x) {
-; CHECK-LABEL: @pow_uitofp_double_base_fast_i32(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to double
-; CHECK-NEXT:    [[RES:%.*]] = tail call fast double @llvm.pow.f64(double [[BASE:%.*]], double [[SUBFP]])
+; CHECK-LABEL: define double @pow_uitofp_double_base_fast_i32(
+; CHECK-SAME: double [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to double
+; CHECK-NEXT:    [[RES:%.*]] = tail call fast double @llvm.pow.f64(double [[BASE]], double [[SUBFP]])
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %subfp = uitofp i32 %x to double
@@ -287,14 +311,15 @@ define double @pow_uitofp_double_base_fast_i32(double %base, i32 %x) {
 }
 
 define double @pow_sitofp_const_base_fast_i64(i64 %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_fast_i64(
-; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i64 [[X:%.*]] to float
-; Do not change 0x400675{{.*}} to the exact constant, see PR42740
-; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x400675{{.*}}
+; CHECK-LABEL: define double @pow_sitofp_const_base_fast_i64(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i64 [[X]] to float
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757680000000
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call fast float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
+; Do not change 0x400675{{.*}} to the exact constant, see PR42740
   %subfp = sitofp i64 %x to float
   %pow = tail call fast float @llvm.pow.f32(float 7.000000e+00, float %subfp)
   %res = fpext float %pow to double
@@ -302,9 +327,10 @@ define double @pow_sitofp_const_base_fast_i64(i64 %x) {
 }
 
 define double @pow_uitofp_const_base_fast_i64(i64 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_fast_i64(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i64 [[X:%.*]] to float
-; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x400675{{.*}}
+; CHECK-LABEL: define double @pow_uitofp_const_base_fast_i64(
+; CHECK-SAME: i64 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i64 [[X]] to float
+; CHECK-NEXT:    [[MUL:%.*]] = fmul fast float [[SUBFP]], 0x4006757680000000
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call fast float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
 ; CHECK-NEXT:    ret double [[RES]]
@@ -316,8 +342,9 @@ define double @pow_uitofp_const_base_fast_i64(i64 %x) {
 }
 
 define double @pow_sitofp_const_base_no_fast(i32 %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_sitofp_const_base_no_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float 7.000000e+00, float [[SUBFP]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
@@ -329,8 +356,9 @@ define double @pow_sitofp_const_base_no_fast(i32 %x) {
 }
 
 define double @pow_uitofp_const_base_no_fast(i32 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_uitofp_const_base_no_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float 7.000000e+00, float [[SUBFP]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
@@ -342,8 +370,9 @@ define double @pow_uitofp_const_base_no_fast(i32 %x) {
 }
 
 define double @pow_sitofp_const_base_2_no_fast(i32 %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_2_no_fast(
-; CHECK-NEXT:    [[LDEXPF:%.*]] = tail call float @ldexpf(float 1.000000e+00, i32 [[X:%.*]])
+; CHECK-LABEL: define double @pow_sitofp_const_base_2_no_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[LDEXPF:%.*]] = tail call float @ldexpf(float 1.000000e+00, i32 [[X]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[LDEXPF]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
@@ -354,8 +383,9 @@ define double @pow_sitofp_const_base_2_no_fast(i32 %x) {
 }
 
 define double @pow_sitofp_const_base_power_of_2_no_fast(i32 %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_power_of_2_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_sitofp_const_base_power_of_2_no_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUBFP]], 4.000000e+00
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
@@ -368,8 +398,9 @@ define double @pow_sitofp_const_base_power_of_2_no_fast(i32 %x) {
 }
 
 define double @pow_uitofp_const_base_2_no_fast(i32 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_2_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_uitofp_const_base_2_no_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call float @llvm.exp2.f32(float [[SUBFP]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
 ; CHECK-NEXT:    ret double [[RES]]
@@ -381,8 +412,9 @@ define double @pow_uitofp_const_base_2_no_fast(i32 %x) {
 }
 
 define double @pow_uitofp_const_base_power_of_2_no_fast(i32 %x) {
-; CHECK-LABEL: @pow_uitofp_const_base_power_of_2_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
+; CHECK-LABEL: define double @pow_uitofp_const_base_power_of_2_no_fast(
+; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
 ; CHECK-NEXT:    [[MUL:%.*]] = fmul float [[SUBFP]], 4.000000e+00
 ; CHECK-NEXT:    [[EXP2:%.*]] = tail call float @llvm.exp2.f32(float [[MUL]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[EXP2]] to double
@@ -395,9 +427,10 @@ define double @pow_uitofp_const_base_power_of_2_no_fast(i32 %x) {
 }
 
 define double @pow_sitofp_float_base_no_fast(float %base, i32 %x) {
-; CHECK-LABEL: @pow_sitofp_float_base_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to float
-; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float [[BASE:%.*]], float [[SUBFP]])
+; CHECK-LABEL: define double @pow_sitofp_float_base_no_fast(
+; CHECK-SAME: float [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X]] to float
+; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float [[BASE]], float [[SUBFP]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
@@ -408,9 +441,10 @@ define double @pow_sitofp_float_base_no_fast(float %base, i32 %x) {
 }
 
 define double @pow_uitofp_float_base_no_fast(float %base, i32 %x) {
-; CHECK-LABEL: @pow_uitofp_float_base_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to float
-; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float [[BASE:%.*]], float [[SUBFP]])
+; CHECK-LABEL: define double @pow_uitofp_float_base_no_fast(
+; CHECK-SAME: float [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to float
+; CHECK-NEXT:    [[POW:%.*]] = tail call float @llvm.pow.f32(float [[BASE]], float [[SUBFP]])
 ; CHECK-NEXT:    [[RES:%.*]] = fpext float [[POW]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
@@ -421,9 +455,10 @@ define double @pow_uitofp_float_base_no_fast(float %base, i32 %x) {
 }
 
 define double @pow_sitofp_double_base_no_fast(double %base, i32 %x) {
-; CHECK-LABEL: @pow_sitofp_double_base_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X:%.*]] to double
-; CHECK-NEXT:    [[POW:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double [[SUBFP]])
+; CHECK-LABEL: define double @pow_sitofp_double_base_no_fast(
+; CHECK-SAME: double [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = sitofp i32 [[X]] to double
+; CHECK-NEXT:    [[POW:%.*]] = tail call double @llvm.pow.f64(double [[BASE]], double [[SUBFP]])
 ; CHECK-NEXT:    ret double [[POW]]
 ;
   %subfp = sitofp i32 %x to double
@@ -432,9 +467,10 @@ define double @pow_sitofp_double_base_no_fast(double %base, i32 %x) {
 }
 
 define double @pow_uitofp_double_base_no_fast(double %base, i32 %x) {
-; CHECK-LABEL: @pow_uitofp_double_base_no_fast(
-; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X:%.*]] to double
-; CHECK-NEXT:    [[POW:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double [[SUBFP]])
+; CHECK-LABEL: define double @pow_uitofp_double_base_no_fast(
+; CHECK-SAME: double [[BASE:%.*]], i32 [[X:%.*]]) {
+; CHECK-NEXT:    [[SUBFP:%.*]] = uitofp i32 [[X]] to double
+; CHECK-NEXT:    [[POW:%.*]] = tail call double @llvm.pow.f64(double [[BASE]], double [[SUBFP]])
 ; CHECK-NEXT:    ret double [[POW]]
 ;
   %subfp = uitofp i32 %x to double
@@ -445,8 +481,9 @@ define double @pow_uitofp_double_base_no_fast(double %base, i32 %x) {
 ; negative test - pow with no FMF is not the same as the loosely-specified powi
 
 define double @powf_exp_const_int_no_fast(double %base) {
-; CHECK-LABEL: @powf_exp_const_int_no_fast(
-; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double 4.000000e+01)
+; CHECK-LABEL: define double @powf_exp_const_int_no_fast(
+; CHECK-SAME: double [[BASE:%.*]]) {
+; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE]], double 4.000000e+01)
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %res = tail call double @llvm.pow.f64(double %base, double 4.000000e+01)
@@ -454,10 +491,11 @@ define double @powf_exp_const_int_no_fast(double %base) {
 }
 
 define double @powf_exp_const_not_int_fast(double %base) {
-; CHECK-LABEL: @powf_exp_const_not_int_fast(
-; CHECK-NEXT:    [[SQRT:%.*]] = call fast double @llvm.sqrt.f64(double [[BASE:%.*]])
-; CHECK-NEXT:    [[POWI:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[BASE]], i32 37)
-; CHECK-NEXT:    [[RES:%.*]] = fmul fast double [[POWI]], [[SQRT]]
+; CHECK-LABEL: define double @powf_exp_const_not_int_fast(
+; CHECK-SAME: double [[BASE:%.*]]) {
+; CHECK-NEXT:    [[SQRT:%.*]] = call fast double @llvm.sqrt.f64(double [[BASE]])
+; CHECK-NEXT:    [[TMP1:%.*]] = tail call fast double @llvm.powi.f64.i32(double [[BASE]], i32 37)
+; CHECK-NEXT:    [[RES:%.*]] = fmul fast double [[TMP1]], [[SQRT]]
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %res = tail call fast double @llvm.pow.f64(double %base, double 3.750000e+01)
@@ -465,8 +503,9 @@ define double @powf_exp_const_not_int_fast(double %base) {
 }
 
 define double @powf_exp_const_not_int_no_fast(double %base) {
-; CHECK-LABEL: @powf_exp_const_not_int_no_fast(
-; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double 3.750000e+01)
+; CHECK-LABEL: define double @powf_exp_const_not_int_no_fast(
+; CHECK-SAME: double [[BASE:%.*]]) {
+; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE]], double 3.750000e+01)
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %res = tail call double @llvm.pow.f64(double %base, double 3.750000e+01)
@@ -476,8 +515,9 @@ define double @powf_exp_const_not_int_no_fast(double %base) {
 ; negative test - pow with no FMF is not the same as the loosely-specified powi
 
 define double @powf_exp_const2_int_no_fast(double %base) {
-; CHECK-LABEL: @powf_exp_const2_int_no_fast(
-; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE:%.*]], double -4.000000e+01)
+; CHECK-LABEL: define double @powf_exp_const2_int_no_fast(
+; CHECK-SAME: double [[BASE:%.*]]) {
+; CHECK-NEXT:    [[RES:%.*]] = tail call double @llvm.pow.f64(double [[BASE]], double -4.000000e+01)
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   %res = tail call double @llvm.pow.f64(double %base, double -4.000000e+01)
@@ -487,8 +527,9 @@ define double @powf_exp_const2_int_no_fast(double %base) {
 ; TODO: This could be transformed the same as scalar if there is an ldexp intrinsic.
 
 define <2 x float> @pow_sitofp_const_base_2_no_fast_vector(<2 x i8> %x) {
-; CHECK-LABEL: @pow_sitofp_const_base_2_no_fast_vector(
-; CHECK-NEXT:    [[S:%.*]] = sitofp <2 x i8> [[X:%.*]] to <2 x float>
+; CHECK-LABEL: define <2 x float> @pow_sitofp_const_base_2_no_fast_vector(
+; CHECK-SAME: <2 x i8> [[X:%.*]]) {
+; CHECK-NEXT:    [[S:%.*]] = sitofp <2 x i8> [[X]] to <2 x float>
 ; CHECK-NEXT:    [[EXP2:%.*]] = call <2 x float> @llvm.exp2.v2f32(<2 x float> [[S]])
 ; CHECK-NEXT:    ret <2 x float> [[EXP2]]
 ;

diff  --git a/llvm/test/Transforms/InstCombine/printf-i16.ll b/llvm/test/Transforms/InstCombine/printf-i16.ll
index c5868b5d5c4cd28..f9cc4ebfbcc3428 100644
--- a/llvm/test/Transforms/InstCombine/printf-i16.ll
+++ b/llvm/test/Transforms/InstCombine/printf-i16.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ;
-; RUN: opt < %s -mtriple=avr-freebsd -passes=instcombine -S | FileCheck %s
-; RUN: opt < %s -mtriple=msp430-linux -passes=instcombine -S | FileCheck %s
+; RUN: opt < %s -mtriple=avr-freebsd -passes=instcombine -S | FileCheck %s --check-prefix=AVR
+; RUN: opt < %s -mtriple=msp430-linux -passes=instcombine -S | FileCheck %s --check-prefix=MSP430
 ;
 ; Verify that the puts to putchar transformation works correctly even for
 ; targets with 16-bit int.
@@ -23,25 +23,42 @@ declare i16 @printf(ptr, ...)
 ; in the same output for calls with equivalent arguments.
 
 define void @xform_printf(i8 %c8, i16 %c16) {
-; CHECK-LABEL: @xform_printf(
-; CHECK-NEXT:    [[PUTCHAR:%.*]] = call i16 @putchar(i16 1)
-; CHECK-NEXT:    [[PUTCHAR1:%.*]] = call i16 @putchar(i16 1)
-; CHECK-NEXT:    [[PUTCHAR2:%.*]] = call i16 @putchar(i16 1)
-; CHECK-NEXT:    [[PUTCHAR3:%.*]] = call i16 @putchar(i16 127)
-; CHECK-NEXT:    [[PUTCHAR4:%.*]] = call i16 @putchar(i16 127)
-; CHECK-NEXT:    [[PUTCHAR5:%.*]] = call i16 @putchar(i16 127)
-; CHECK-NEXT:    [[PUTCHAR6:%.*]] = call i16 @putchar(i16 128)
-; CHECK-NEXT:    [[PUTCHAR7:%.*]] = call i16 @putchar(i16 128)
-; CHECK-NEXT:    [[PUTCHAR8:%.*]] = call i16 @putchar(i16 128)
-; CHECK-NEXT:    [[PUTCHAR9:%.*]] = call i16 @putchar(i16 255)
-; CHECK-NEXT:    [[PUTCHAR10:%.*]] = call i16 @putchar(i16 255)
-; CHECK-NEXT:    [[PUTCHAR11:%.*]] = call i16 @putchar(i16 255)
-; CHECK-NEXT:    [[TMP1:%.*]] = zext i8 [[C8:%.*]] to i16
-; CHECK-NEXT:    [[PUTCHAR12:%.*]] = call i16 @putchar(i16 [[TMP1]])
-; CHECK-NEXT:    [[PUTCHAR13:%.*]] = call i16 @putchar(i16 [[C16:%.*]])
-; CHECK-NEXT:    ret void
+; AVR-LABEL: @xform_printf(
+; AVR-NEXT:    [[PUTCHAR:%.*]] = call i16 @putchar(i16 1)
+; AVR-NEXT:    [[PUTCHAR1:%.*]] = call i16 @putchar(i16 1)
+; AVR-NEXT:    [[PUTCHAR2:%.*]] = call i16 @putchar(i16 1)
+; AVR-NEXT:    [[PUTCHAR3:%.*]] = call i16 @putchar(i16 127)
+; AVR-NEXT:    [[PUTCHAR4:%.*]] = call i16 @putchar(i16 127)
+; AVR-NEXT:    [[PUTCHAR5:%.*]] = call i16 @putchar(i16 127)
+; AVR-NEXT:    [[PUTCHAR6:%.*]] = call i16 @putchar(i16 128)
+; AVR-NEXT:    [[PUTCHAR7:%.*]] = call i16 @putchar(i16 128)
+; AVR-NEXT:    [[PUTCHAR8:%.*]] = call i16 @putchar(i16 128)
+; AVR-NEXT:    [[PUTCHAR9:%.*]] = call i16 @putchar(i16 255)
+; AVR-NEXT:    [[PUTCHAR10:%.*]] = call i16 @putchar(i16 255)
+; AVR-NEXT:    [[PUTCHAR11:%.*]] = call i16 @putchar(i16 255)
+; AVR-NEXT:    [[TMP1:%.*]] = zext i8 [[C8:%.*]] to i16
+; AVR-NEXT:    [[PUTCHAR12:%.*]] = call i16 @putchar(i16 [[TMP1]])
+; AVR-NEXT:    [[PUTCHAR13:%.*]] = call i16 @putchar(i16 [[C16:%.*]])
+; AVR-NEXT:    ret void
+;
+; MSP430-LABEL: @xform_printf(
+; MSP430-NEXT:    [[PUTCHAR:%.*]] = call i16 @putchar(i16 1)
+; MSP430-NEXT:    [[PUTCHAR1:%.*]] = call i16 @putchar(i16 1)
+; MSP430-NEXT:    [[PUTCHAR2:%.*]] = call i16 @putchar(i16 1)
+; MSP430-NEXT:    [[PUTCHAR3:%.*]] = call i16 @putchar(i16 127)
+; MSP430-NEXT:    [[PUTCHAR4:%.*]] = call i16 @putchar(i16 127)
+; MSP430-NEXT:    [[PUTCHAR5:%.*]] = call i16 @putchar(i16 127)
+; MSP430-NEXT:    [[PUTCHAR6:%.*]] = call i16 @putchar(i16 128)
+; MSP430-NEXT:    [[PUTCHAR7:%.*]] = call i16 @putchar(i16 128)
+; MSP430-NEXT:    [[PUTCHAR8:%.*]] = call i16 @putchar(i16 128)
+; MSP430-NEXT:    [[PUTCHAR9:%.*]] = call i16 @putchar(i16 255)
+; MSP430-NEXT:    [[PUTCHAR10:%.*]] = call i16 @putchar(i16 255)
+; MSP430-NEXT:    [[PUTCHAR11:%.*]] = call i16 @putchar(i16 255)
+; MSP430-NEXT:    [[TMP1:%.*]] = zext i8 [[C8:%.*]] to i16
+; MSP430-NEXT:    [[PUTCHAR12:%.*]] = call i16 @putchar(i16 [[TMP1]])
+; MSP430-NEXT:    [[PUTCHAR13:%.*]] = call i16 @putchar(i16 [[C16:%.*]])
+; MSP430-NEXT:    ret void
 ;
-
   call i16 (ptr, ...) @printf(ptr @s1)
   call i16 (ptr, ...) @printf(ptr @pcnt_c, i16 1)
   call i16 (ptr, ...) @printf(ptr @pcnt_s, ptr @s1)

diff  --git a/llvm/test/Transforms/InstCombine/puts-i16.ll b/llvm/test/Transforms/InstCombine/puts-i16.ll
index 58d12ab9cd2ab74..92769083a8affe5 100644
--- a/llvm/test/Transforms/InstCombine/puts-i16.ll
+++ b/llvm/test/Transforms/InstCombine/puts-i16.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ;
-; RUN: opt < %s -mtriple=avr-linux -passes=instcombine -S | FileCheck %s
-; RUN: opt < %s -mtriple=msp430-freebsd -passes=instcombine -S | FileCheck %s
+; RUN: opt < %s -mtriple=avr-linux -passes=instcombine -S | FileCheck %s --check-prefix=AVR
+; RUN: opt < %s -mtriple=msp430-freebsd -passes=instcombine -S | FileCheck %s --check-prefix=MSP430
 ;
 ; Test that the puts to putchar transformation works correctly even for
 ; targets with 16-bit int.
@@ -12,11 +12,15 @@ declare i16 @puts(ptr)
 @empty = constant [1 x i8] c"\00"
 
 define void @xform_puts(i16 %c) {
-; CHECK-LABEL: @xform_puts(
-; CHECK-NEXT:    [[PUTCHAR:%.*]] = call i16 @putchar(i16 10)
-; CHECK-NEXT:    ret void
-;
 ; Transform puts("") to putchar("\n").
+; AVR-LABEL: @xform_puts(
+; AVR-NEXT:    [[PUTCHAR:%.*]] = call i16 @putchar(i16 10)
+; AVR-NEXT:    ret void
+;
+; MSP430-LABEL: @xform_puts(
+; MSP430-NEXT:    [[PUTCHAR:%.*]] = call i16 @putchar(i16 10)
+; MSP430-NEXT:    ret void
+;
   call i16 @puts(ptr @empty)
 
   ret void

diff  --git a/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll b/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll
index 21c08dc23148b76..ae23a2e9ce35231 100644
--- a/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll
+++ b/llvm/test/Transforms/LoopUnroll/ARM/upperbound.ll
@@ -75,8 +75,8 @@ define i32 @test2(i32 %l86) {
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[L86_OFF:%.*]] = add i32 [[L86:%.*]], -1
 ; CHECK-NEXT:    [[SWITCH:%.*]] = icmp ult i32 [[L86_OFF]], 24
-; CHECK-NEXT:    [[DOTNOT30:%.*]] = icmp ne i32 [[L86]], 25
-; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = zext i1 [[DOTNOT30]] to i32
+; CHECK-NEXT:    [[OR_COND23_NOT:%.*]] = icmp ne i32 [[L86]], 25
+; CHECK-NEXT:    [[SPEC_SELECT:%.*]] = zext i1 [[OR_COND23_NOT]] to i32
 ; CHECK-NEXT:    [[COMMON_RET_OP:%.*]] = select i1 [[SWITCH]], i32 0, i32 [[SPEC_SELECT]]
 ; CHECK-NEXT:    ret i32 [[COMMON_RET_OP]]
 ;

diff  --git a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
index 64eb35ebe853447..240882bab636dd7 100644
--- a/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/PowerPC/widened-massv-call.ll
@@ -9,20 +9,20 @@ define dso_local double @test(ptr %Arr) {
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ [[TMP5:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY]] ], [ [[TMP4:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP0:%.*]] = zext i32 [[INDEX]] to i64
 ; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds float, ptr [[ARR:%.*]], i64 [[TMP0]]
 ; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x float>, ptr [[TMP1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = fpext <2 x float> [[WIDE_LOAD]] to <2 x double>
-; CHECK-NEXT:    [[TMP4:%.*]] = tail call fast <2 x double> @__sind2(<2 x double> [[TMP3]])
-; CHECK-NEXT:    [[TMP5]] = fadd fast <2 x double> [[TMP4]], [[VEC_PHI]]
+; CHECK-NEXT:    [[TMP2:%.*]] = fpext <2 x float> [[WIDE_LOAD]] to <2 x double>
+; CHECK-NEXT:    [[TMP3:%.*]] = tail call fast <2 x double> @__sind2(<2 x double> [[TMP2]])
+; CHECK-NEXT:    [[TMP4]] = fadd fast <2 x double> [[TMP3]], [[VEC_PHI]]
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
-; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128
-; CHECK-NEXT:    br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128
+; CHECK-NEXT:    br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
 ; CHECK:       middle.block:
-; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi <2 x double> [ [[TMP5]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[TMP7:%.*]] = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> [[DOTLCSSA]])
-; CHECK-NEXT:    ret double [[TMP7]]
+; CHECK-NEXT:    [[DOTLCSSA:%.*]] = phi <2 x double> [ [[TMP4]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP6:%.*]] = tail call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> [[DOTLCSSA]])
+; CHECK-NEXT:    ret double [[TMP6]]
 ;
 entry:
   br label %for.cond

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index 00cabd58de91394..fab924580cbb70d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -22,14 +22,14 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
 ; RV32-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]]
 ; RV32-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
 ; RV32:       vector.memcheck:
-; RV32-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 79880
-; RV32-NEXT:    [[UGLYGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i64 39940
-; RV32-NEXT:    [[UGLYGEP2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 159752
-; RV32-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[A]], [[UGLYGEP1]]
-; RV32-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[TRIGGER]], [[UGLYGEP]]
+; RV32-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 79880
+; RV32-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i64 39940
+; RV32-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 159752
+; RV32-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; RV32-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[TRIGGER]], [[SCEVGEP]]
 ; RV32-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; RV32-NEXT:    [[BOUND03:%.*]] = icmp ult ptr [[A]], [[UGLYGEP2]]
-; RV32-NEXT:    [[BOUND14:%.*]] = icmp ult ptr [[B]], [[UGLYGEP]]
+; RV32-NEXT:    [[BOUND03:%.*]] = icmp ult ptr [[A]], [[SCEVGEP2]]
+; RV32-NEXT:    [[BOUND14:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
 ; RV32-NEXT:    [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]]
 ; RV32-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
 ; RV32-NEXT:    br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
@@ -104,14 +104,14 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
 ; RV64-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 625, [[TMP2]]
 ; RV64-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
 ; RV64:       vector.memcheck:
-; RV64-NEXT:    [[UGLYGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 79880
-; RV64-NEXT:    [[UGLYGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i64 39940
-; RV64-NEXT:    [[UGLYGEP2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 159752
-; RV64-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[A]], [[UGLYGEP1]]
-; RV64-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[TRIGGER]], [[UGLYGEP]]
+; RV64-NEXT:    [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 79880
+; RV64-NEXT:    [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[TRIGGER:%.*]], i64 39940
+; RV64-NEXT:    [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 159752
+; RV64-NEXT:    [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; RV64-NEXT:    [[BOUND1:%.*]] = icmp ult ptr [[TRIGGER]], [[SCEVGEP]]
 ; RV64-NEXT:    [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
-; RV64-NEXT:    [[BOUND03:%.*]] = icmp ult ptr [[A]], [[UGLYGEP2]]
-; RV64-NEXT:    [[BOUND14:%.*]] = icmp ult ptr [[B]], [[UGLYGEP]]
+; RV64-NEXT:    [[BOUND03:%.*]] = icmp ult ptr [[A]], [[SCEVGEP2]]
+; RV64-NEXT:    [[BOUND14:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
 ; RV64-NEXT:    [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]]
 ; RV64-NEXT:    [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]]
 ; RV64-NEXT:    br i1 [[CONFLICT_RDX]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]

diff  --git a/llvm/test/Transforms/MergeICmps/X86/addressspaces.ll b/llvm/test/Transforms/MergeICmps/X86/addressspaces.ll
index 28464c795ff239f..fc85f57e020c103 100644
--- a/llvm/test/Transforms/MergeICmps/X86/addressspaces.ll
+++ b/llvm/test/Transforms/MergeICmps/X86/addressspaces.ll
@@ -11,7 +11,7 @@ define void @form_memcmp(ptr dereferenceable(16) %a, ptr dereferenceable(16) %b)
 ; CHECK-NEXT:    br label %"bb1+bb2"
 ; CHECK:       "bb1+bb2":
 ; CHECK-NEXT:    [[MEMCMP:%.*]] = call i32 @memcmp(ptr [[A]], ptr [[B]], i64 16)
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[MEMCMP]], 0
+; CHECK-NEXT:    [[TMP0:%.*]] = icmp eq i32 [[MEMCMP]], 0
 ; CHECK-NEXT:    br label [[BB3:%.*]]
 ; CHECK:       bb3:
 ; CHECK-NEXT:    ret void

diff  --git a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
index b3884368ef04e7a..8d41d9a486801f3 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll
@@ -94,7 +94,7 @@ define i64 @sum_2_at_with_int_conversion(ptr %A, ptr %B, i64 %N) {
 ; CHECK-NEXT:    [[SUM_NEXT]] = add i64 [[ADD]], [[LV_I9]]
 ; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
 ; CHECK-NEXT:    [[C:%.*]] = icmp slt i64 [[IV]], [[N]]
-; CHECK-NEXT:    br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-NEXT:    br i1 [[C]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP4:![0-9]+]]
 ; CHECK:       exit:
 ; CHECK-NEXT:    [[SUM_NEXT_LCSSA:%.*]] = phi i64 [ [[SUM_NEXT_PEEL]], [[AT_WITH_INT_CONVERSION_EXIT11_PEEL:%.*]] ], [ [[SUM_NEXT]], [[AT_WITH_INT_CONVERSION_EXIT11]] ]
 ; CHECK-NEXT:    ret i64 [[SUM_NEXT_LCSSA]]

diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/splat-loads.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/splat-loads.ll
index 47347719d373a0d..111ca8abf8122fb 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/splat-loads.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/splat-loads.ll
@@ -11,15 +11,15 @@ define void @splat_loads_double(ptr %array1, ptr %array2, ptr %ptrA, ptr %ptrB)
 ; CHECK-NEXT:    [[GEP_2_1:%.*]] = getelementptr inbounds double, ptr [[ARRAY2:%.*]], i64 1
 ; CHECK-NEXT:    [[LD_2_0:%.*]] = load double, ptr [[ARRAY2]], align 8
 ; CHECK-NEXT:    [[LD_2_1:%.*]] = load double, ptr [[GEP_2_1]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x double>, ptr [[ARRAY1:%.*]], align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x double> poison, double [[LD_2_0]], i32 0
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> [[TMP1]], [[SHUFFLE]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x double>, ptr [[ARRAY1:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x double> poison, double [[LD_2_0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x double> [[TMP0]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x double> poison, double [[LD_2_1]], i32 0
-; CHECK-NEXT:    [[SHUFFLE1:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP5:%.*]] = fmul <2 x double> [[TMP1]], [[SHUFFLE1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x double> [[TMP3]], [[TMP5]]
-; CHECK-NEXT:    store <2 x double> [[TMP6]], ptr [[ARRAY1]], align 8
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul <2 x double> [[TMP0]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = fadd <2 x double> [[TMP3]], [[TMP6]]
+; CHECK-NEXT:    store <2 x double> [[TMP7]], ptr [[ARRAY1]], align 8
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -52,15 +52,15 @@ define void @splat_loads_float(ptr %array1, ptr %array2, ptr %ptrA, ptr %ptrB) {
 ; CHECK-NEXT:    [[GEP_2_1:%.*]] = getelementptr inbounds float, ptr [[ARRAY2:%.*]], i64 1
 ; CHECK-NEXT:    [[LD_2_0:%.*]] = load float, ptr [[ARRAY2]], align 8
 ; CHECK-NEXT:    [[LD_2_1:%.*]] = load float, ptr [[GEP_2_1]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x float>, ptr [[ARRAY1:%.*]], align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x float> poison, float [[LD_2_0]], i32 0
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x float> [[TMP1]], [[SHUFFLE]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[ARRAY1:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x float> poison, float [[LD_2_0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul <2 x float> [[TMP0]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x float> poison, float [[LD_2_1]], i32 0
-; CHECK-NEXT:    [[SHUFFLE1:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP5:%.*]] = fmul <2 x float> [[TMP1]], [[SHUFFLE1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = fadd <2 x float> [[TMP3]], [[TMP5]]
-; CHECK-NEXT:    store <2 x float> [[TMP6]], ptr [[ARRAY1]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul <2 x float> [[TMP0]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = fadd <2 x float> [[TMP3]], [[TMP6]]
+; CHECK-NEXT:    store <2 x float> [[TMP7]], ptr [[ARRAY1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -93,15 +93,15 @@ define void @splat_loads_i64(ptr %array1, ptr %array2, ptr %ptrA, ptr %ptrB) {
 ; CHECK-NEXT:    [[GEP_2_1:%.*]] = getelementptr inbounds i64, ptr [[ARRAY2:%.*]], i64 1
 ; CHECK-NEXT:    [[LD_2_0:%.*]] = load i64, ptr [[ARRAY2]], align 8
 ; CHECK-NEXT:    [[LD_2_1:%.*]] = load i64, ptr [[GEP_2_1]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[ARRAY1:%.*]], align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[LD_2_0]], i32 0
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP1]], [[SHUFFLE]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i64>, ptr [[ARRAY1:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x i64> poison, i64 [[LD_2_0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i64> [[TMP1]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i64> [[TMP0]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> poison, i64 [[LD_2_1]], i32 0
-; CHECK-NEXT:    [[SHUFFLE1:%.*]] = shufflevector <2 x i64> [[TMP4]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP5:%.*]] = or <2 x i64> [[TMP1]], [[SHUFFLE1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[TMP3]], [[TMP5]]
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr [[ARRAY1]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i64> [[TMP4]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP6:%.*]] = or <2 x i64> [[TMP0]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = add <2 x i64> [[TMP3]], [[TMP6]]
+; CHECK-NEXT:    store <2 x i64> [[TMP7]], ptr [[ARRAY1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -134,15 +134,15 @@ define void @splat_loads_i32(ptr %array1, ptr %array2, ptr %ptrA, ptr %ptrB) {
 ; CHECK-NEXT:    [[GEP_2_1:%.*]] = getelementptr inbounds i32, ptr [[ARRAY2:%.*]], i64 1
 ; CHECK-NEXT:    [[LD_2_0:%.*]] = load i32, ptr [[ARRAY2]], align 8
 ; CHECK-NEXT:    [[LD_2_1:%.*]] = load i32, ptr [[GEP_2_1]], align 8
-; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[ARRAY1:%.*]], align 8
-; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i32> poison, i32 [[LD_2_0]], i32 0
-; CHECK-NEXT:    [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i32> [[TMP1]], [[SHUFFLE]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <2 x i32>, ptr [[ARRAY1:%.*]], align 8
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <2 x i32> poison, i32 [[LD_2_0]], i32 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x i32> [[TMP1]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = or <2 x i32> [[TMP0]], [[TMP2]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i32> poison, i32 [[LD_2_1]], i32 0
-; CHECK-NEXT:    [[SHUFFLE1:%.*]] = shufflevector <2 x i32> [[TMP4]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP5:%.*]] = or <2 x i32> [[TMP1]], [[SHUFFLE1]]
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i32> [[TMP3]], [[TMP5]]
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], ptr [[ARRAY1]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <2 x i32> [[TMP4]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP6:%.*]] = or <2 x i32> [[TMP0]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = add <2 x i32> [[TMP3]], [[TMP6]]
+; CHECK-NEXT:    store <2 x i32> [[TMP7]], ptr [[ARRAY1]], align 4
 ; CHECK-NEXT:    ret void
 ;
 entry:

diff  --git a/llvm/test/Transforms/SLPVectorizer/X86/control-dependence.ll b/llvm/test/Transforms/SLPVectorizer/X86/control-dependence.ll
index 178a67e35a33f2d..d1dd57fac495879 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/control-dependence.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/control-dependence.ll
@@ -8,10 +8,10 @@ declare i64 @may_throw() willreturn
 ; Base case with no interesting control dependencies
 define void @test_no_control(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @test_no_control(
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP2]], [[TMP4]]
-; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = add <2 x i64> [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    store <2 x i64> [[TMP3]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -34,11 +34,11 @@ define void @test1(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @test1(
 ; CHECK-NEXT:    [[C1:%.*]] = load i64, ptr [[C:%.*]], align 4
 ; CHECK-NEXT:    [[C2:%.*]] = call i64 @may_inf_loop_ro()
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[C2]], i32 1
-; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP2]], [[TMP4]]
-; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[C2]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = add <2 x i64> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -60,11 +60,11 @@ define void @test2(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @test2(
 ; CHECK-NEXT:    [[C1:%.*]] = load i64, ptr [[C:%.*]], align 4
 ; CHECK-NEXT:    [[C2:%.*]] = call i64 @may_inf_loop_ro()
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[C2]], i32 1
-; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP2]], [[TMP4]]
-; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[C2]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = add <2 x i64> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %c1 = load i64, ptr %c
@@ -87,11 +87,11 @@ define void @test3(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @test3(
 ; CHECK-NEXT:    [[C1:%.*]] = load i64, ptr [[C:%.*]], align 4
 ; CHECK-NEXT:    [[C2:%.*]] = call i64 @may_inf_loop_ro()
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[C2]], i32 1
-; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP2]], [[TMP4]]
-; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[C2]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = add <2 x i64> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -113,11 +113,11 @@ define void @test4(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @test4(
 ; CHECK-NEXT:    [[C1:%.*]] = load i64, ptr [[C:%.*]], align 4
 ; CHECK-NEXT:    [[C2:%.*]] = call i64 @may_inf_loop_ro()
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[C2]], i32 1
-; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP2]], [[TMP4]]
-; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[C2]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = add <2 x i64> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -139,11 +139,11 @@ define void @test5(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @test5(
 ; CHECK-NEXT:    [[C2:%.*]] = call i64 @may_inf_loop_ro()
 ; CHECK-NEXT:    [[C1:%.*]] = load i64, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[C2]], i32 1
-; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP2]], [[TMP4]]
-; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[C1]], i32 0
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[C2]], i32 1
+; CHECK-NEXT:    [[TMP4:%.*]] = add <2 x i64> [[TMP1]], [[TMP3]]
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %a2 = getelementptr i64, ptr %a, i32 1
@@ -164,10 +164,10 @@ define void @test5(ptr %a, ptr %b, ptr %c) {
 define void @test6(ptr %a, ptr %b, ptr %c) {
 ; CHECK-LABEL: @test6(
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @may_inf_loop_ro()
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
-; CHECK-NEXT:    [[TMP5:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[TMP3]], [[TMP5]]
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[A:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = add <2 x i64> [[TMP2]], [[TMP3]]
+; CHECK-NEXT:    store <2 x i64> [[TMP4]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -200,11 +200,11 @@ define void @test7(ptr %a, ptr %b, ptr %c) {
 ; CHECK-NEXT:    store i64 0, ptr [[A]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @may_inf_loop_ro()
 ; CHECK-NEXT:    [[V2:%.*]] = load i64, ptr [[A2]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> poison, i64 [[V1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[V2]], i32 1
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[TMP5]], [[TMP3]]
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[V1]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[V2]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -233,11 +233,11 @@ define void @test8(ptr %a, ptr %b, ptr %c) {
 ; CHECK-NEXT:    store i64 0, ptr [[A]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @may_throw() #[[ATTR4:[0-9]+]]
 ; CHECK-NEXT:    [[V2:%.*]] = load i64, ptr [[A2]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> poison, i64 [[V1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[V2]], i32 1
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[TMP5]], [[TMP3]]
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[V1]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[V2]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -266,11 +266,11 @@ define void @test9(ptr %a, ptr %b, ptr %c) {
 ; CHECK-NEXT:    store i64 0, ptr [[A]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @may_throw()
 ; CHECK-NEXT:    [[V2:%.*]] = load i64, ptr [[A2]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> poison, i64 [[V1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[V2]], i32 1
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[TMP5]], [[TMP3]]
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[V1]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[V2]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -301,11 +301,11 @@ define void @test10(ptr %a, ptr %b, ptr %c) {
 ; CHECK-NEXT:    store i64 [[U1]], ptr [[A]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @may_inf_loop_ro()
 ; CHECK-NEXT:    [[U2:%.*]] = udiv i64 200, [[V2]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> poison, i64 [[U1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[U2]], i32 1
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[TMP5]], [[TMP3]]
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr [[B:%.*]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[U1]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[U2]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %v1 = load i64, ptr %a
@@ -337,11 +337,11 @@ define void @test11(i64 %x, i64 %y, ptr %b, ptr %c) {
 ; CHECK-NEXT:    store i64 [[U1]], ptr [[B:%.*]], align 4
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @may_inf_loop_ro()
 ; CHECK-NEXT:    [[U2:%.*]] = udiv i64 200, [[Y:%.*]]
-; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
-; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> poison, i64 [[U1]], i32 0
-; CHECK-NEXT:    [[TMP5:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[U2]], i32 1
-; CHECK-NEXT:    [[TMP6:%.*]] = add <2 x i64> [[TMP5]], [[TMP3]]
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], ptr [[B]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load <2 x i64>, ptr [[C:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> poison, i64 [[U1]], i32 0
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[U2]], i32 1
+; CHECK-NEXT:    [[TMP5:%.*]] = add <2 x i64> [[TMP4]], [[TMP2]]
+; CHECK-NEXT:    store <2 x i64> [[TMP5]], ptr [[B]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %u1 = udiv i64 200, %x


        


More information about the llvm-commits mailing list