[llvm] 0248e24 - [X86][update_llc_test_checks] Use a less greedy regular expression for replacing constant pool labels in tests.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Sun Mar 28 11:40:00 PDT 2021
Author: Craig Topper
Date: 2021-03-28T11:39:46-07:00
New Revision: 0248e24071666a348f10cf49496ef5fde4c986d2
URL: https://github.com/llvm/llvm-project/commit/0248e24071666a348f10cf49496ef5fde4c986d2
DIFF: https://github.com/llvm/llvm-project/commit/0248e24071666a348f10cf49496ef5fde4c986d2.diff
LOG: [X86][update_llc_test_checks] Use a less greedy regular expression for replacing constant pool labels in tests.
While working on D97208 I noticed that these greedy regular
expressions prevent tests from failing when (%rip) appears after
a constant pool label when it didn't before.
Reviewed By: RKSimon, pengfei
Differential Revision: https://reviews.llvm.org/D99460
Added:
Modified:
llvm/test/CodeGen/X86/WidenArith.ll
llvm/test/CodeGen/X86/addsub-constant-folding.ll
llvm/test/CodeGen/X86/atomic-fp.ll
llvm/test/CodeGen/X86/avx-cmp.ll
llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
llvm/test/CodeGen/X86/avx2-arith.ll
llvm/test/CodeGen/X86/avx2-conversions.ll
llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll
llvm/test/CodeGen/X86/avx2-nontemporal.ll
llvm/test/CodeGen/X86/avx2-shift.ll
llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
llvm/test/CodeGen/X86/avx2-vector-shifts.ll
llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
llvm/test/CodeGen/X86/avx512-intrinsics.ll
llvm/test/CodeGen/X86/avx512bw-intrinsics.ll
llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll
llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll
llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
llvm/test/CodeGen/X86/bitreverse.ll
llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
llvm/test/CodeGen/X86/cmov-fp.ll
llvm/test/CodeGen/X86/cmp.ll
llvm/test/CodeGen/X86/code-model-elf.ll
llvm/test/CodeGen/X86/combine-bextr.ll
llvm/test/CodeGen/X86/combine-bitreverse.ll
llvm/test/CodeGen/X86/combine-multiplies.ll
llvm/test/CodeGen/X86/extractelement-fp.ll
llvm/test/CodeGen/X86/fast-isel-fneg.ll
llvm/test/CodeGen/X86/fildll.ll
llvm/test/CodeGen/X86/fma-scalar-combine.ll
llvm/test/CodeGen/X86/fmf-flags.ll
llvm/test/CodeGen/X86/fp-cvt.ll
llvm/test/CodeGen/X86/fp-intrinsics.ll
llvm/test/CodeGen/X86/fp-stack-set-st1.ll
llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll
llvm/test/CodeGen/X86/fp128-cast.ll
llvm/test/CodeGen/X86/fp128-i128.ll
llvm/test/CodeGen/X86/fp80-strict-scalar.ll
llvm/test/CodeGen/X86/fptosi-sat-scalar.ll
llvm/test/CodeGen/X86/fptoui-sat-scalar.ll
llvm/test/CodeGen/X86/funnel-shift-rot.ll
llvm/test/CodeGen/X86/haddsub-broadcast.ll
llvm/test/CodeGen/X86/half.ll
llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
llvm/test/CodeGen/X86/i64-to-float.ll
llvm/test/CodeGen/X86/insert-into-constant-vector.ll
llvm/test/CodeGen/X86/insertelement-var-index.ll
llvm/test/CodeGen/X86/known-bits-vector.ll
llvm/test/CodeGen/X86/known-signbits-vector.ll
llvm/test/CodeGen/X86/limited-prec.ll
llvm/test/CodeGen/X86/masked_gather_scatter.ll
llvm/test/CodeGen/X86/memcmp-minsize.ll
llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll
llvm/test/CodeGen/X86/memcmp-optsize.ll
llvm/test/CodeGen/X86/memcmp-pgso.ll
llvm/test/CodeGen/X86/memcmp.ll
llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
llvm/test/CodeGen/X86/mmx-arith.ll
llvm/test/CodeGen/X86/mmx-fold-zero.ll
llvm/test/CodeGen/X86/neg_fp.ll
llvm/test/CodeGen/X86/nontemporal.ll
llvm/test/CodeGen/X86/packss.ll
llvm/test/CodeGen/X86/peep-test-1.ll
llvm/test/CodeGen/X86/pointer-vector.ll
llvm/test/CodeGen/X86/popcnt.ll
llvm/test/CodeGen/X86/pr15309.ll
llvm/test/CodeGen/X86/pr34080-2.ll
llvm/test/CodeGen/X86/pr34605.ll
llvm/test/CodeGen/X86/pr40539.ll
llvm/test/CodeGen/X86/pr40891.ll
llvm/test/CodeGen/X86/pr46527.ll
llvm/test/CodeGen/X86/pr47299.ll
llvm/test/CodeGen/X86/rotate-extract-vector.ll
llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
llvm/test/CodeGen/X86/scalar-int-to-fp.ll
llvm/test/CodeGen/X86/select-of-fp-constants.ll
llvm/test/CodeGen/X86/select.ll
llvm/test/CodeGen/X86/setcc-lowering.ll
llvm/test/CodeGen/X86/shrink-fp-const2.ll
llvm/test/CodeGen/X86/shrink_vmul.ll
llvm/test/CodeGen/X86/sink-addsub-of-const.ll
llvm/test/CodeGen/X86/slow-pmulld.ll
llvm/test/CodeGen/X86/sse-fcopysign.ll
llvm/test/CodeGen/X86/sse-load-ret.ll
llvm/test/CodeGen/X86/sse1-fcopysign.ll
llvm/test/CodeGen/X86/sse1.ll
llvm/test/CodeGen/X86/sse2.ll
llvm/test/CodeGen/X86/sse3.ll
llvm/test/CodeGen/X86/uint64-to-float.ll
llvm/test/CodeGen/X86/uint_to_fp-2.ll
llvm/test/CodeGen/X86/uint_to_fp-3.ll
llvm/test/CodeGen/X86/urem-power-of-two.ll
llvm/test/CodeGen/X86/var-permute-256.ll
llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
llvm/test/CodeGen/X86/vec_fabs.ll
llvm/test/CodeGen/X86/vec_fneg.ll
llvm/test/CodeGen/X86/vec_fpext.ll
llvm/test/CodeGen/X86/vec_fptrunc.ll
llvm/test/CodeGen/X86/vec_logical.ll
llvm/test/CodeGen/X86/vec_partial.ll
llvm/test/CodeGen/X86/vec_reassociate.ll
llvm/test/CodeGen/X86/vec_shift4.ll
llvm/test/CodeGen/X86/vector-fshl-128.ll
llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
llvm/test/CodeGen/X86/vector-fshr-128.ll
llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
llvm/test/CodeGen/X86/vector-gep.ll
llvm/test/CodeGen/X86/vector-idiv-v2i32.ll
llvm/test/CodeGen/X86/vector-lzcnt-128.ll
llvm/test/CodeGen/X86/vector-lzcnt-256.ll
llvm/test/CodeGen/X86/vector-mul.ll
llvm/test/CodeGen/X86/vector-rotate-128.ll
llvm/test/CodeGen/X86/vector-sext.ll
llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
llvm/test/CodeGen/X86/vector-shift-shl-128.ll
llvm/test/CodeGen/X86/vector-shift-shl-256.ll
llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll
llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll
llvm/test/CodeGen/X86/vector-shuffle-combining.ll
llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
llvm/test/CodeGen/X86/vshift-6.ll
llvm/test/CodeGen/X86/widen_load-2.ll
llvm/test/CodeGen/X86/x86-shifts.ll
llvm/test/CodeGen/X86/xop-mask-comments.ll
llvm/test/CodeGen/X86/xor.ll
llvm/utils/UpdateTestChecks/asm.py
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/WidenArith.ll b/llvm/test/CodeGen/X86/WidenArith.ll
index cb9bf03b64c2..8d1eedc96a6e 100644
--- a/llvm/test/CodeGen/X86/WidenArith.ll
+++ b/llvm/test/CodeGen/X86/WidenArith.ll
@@ -11,7 +11,7 @@ define <8 x i32> @test(<8 x float> %a, <8 x float> %b) {
; X86-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; X86-NEXT: vcmpltps %ymm3, %ymm2, %ymm1
; X86-NEXT: vandps %ymm1, %ymm0, %ymm0
-; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: test:
diff --git a/llvm/test/CodeGen/X86/addsub-constant-folding.ll b/llvm/test/CodeGen/X86/addsub-constant-folding.ll
index c004e77f9ae5..e6af950dd273 100644
--- a/llvm/test/CodeGen/X86/addsub-constant-folding.ll
+++ b/llvm/test/CodeGen/X86/addsub-constant-folding.ll
@@ -64,7 +64,7 @@ define i32 @add_const_add_const_extrause(i32 %arg) {
define <4 x i32> @vec_add_const_add_const(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_add_const:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_add_const:
@@ -87,7 +87,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
@@ -115,7 +115,7 @@ define <4 x i32> @vec_add_const_add_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_add_const_add_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_add_const_nonsplat:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_add_const_nonsplat:
@@ -186,7 +186,7 @@ define i32 @add_const_sub_const_extrause(i32 %arg) {
define <4 x i32> @vec_add_const_sub_const(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_sub_const:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_sub_const:
@@ -209,7 +209,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: paddd %xmm1, %xmm0
; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
@@ -237,7 +237,7 @@ define <4 x i32> @vec_add_const_sub_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_add_const_sub_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_add_const_sub_const_nonsplat:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_add_const_sub_const_nonsplat:
@@ -440,7 +440,7 @@ define i32 @sub_const_add_const_extrause(i32 %arg) {
define <4 x i32> @vec_sub_const_add_const(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_add_const:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_add_const:
@@ -458,10 +458,10 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
@@ -487,7 +487,7 @@ define <4 x i32> @vec_sub_const_add_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_sub_const_add_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_add_const_nonsplat:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_add_const_nonsplat:
@@ -558,7 +558,7 @@ define i32 @sub_const_sub_const_extrause(i32 %arg) {
define <4 x i32> @vec_sub_const_sub_const(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_sub_const:
; X86: # %bb.0:
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_sub_const:
@@ -576,10 +576,10 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: addl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
@@ -605,7 +605,7 @@ define <4 x i32> @vec_sub_const_sub_const_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_sub_const_sub_const_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_sub_const_sub_const_nonsplat:
; X86: # %bb.0:
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_sub_const_sub_const_nonsplat:
@@ -698,7 +698,7 @@ define <4 x i32> @vec_sub_const_const_sub_extrause(<4 x i32> %arg) {
; X86: # %bb.0:
; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
; X86-NEXT: calll vec_use at PLT
; X86-NEXT: movdqa {{.*#+}} xmm0 = [2,2,2,2]
@@ -1074,7 +1074,7 @@ define i32 @const_sub_const_sub_extrause(i32 %arg) {
define <4 x i32> @vec_const_sub_const_sub(<4 x i32> %arg) {
; X86-LABEL: vec_const_sub_const_sub:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_sub_const_sub:
@@ -1126,7 +1126,7 @@ define <4 x i32> @vec_const_sub_const_sub_extrause(<4 x i32> %arg) {
define <4 x i32> @vec_const_sub_const_sub_nonsplat(<4 x i32> %arg) {
; X86-LABEL: vec_const_sub_const_sub_nonsplat:
; X86: # %bb.0:
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_sub_const_sub_nonsplat:
diff --git a/llvm/test/CodeGen/X86/atomic-fp.ll b/llvm/test/CodeGen/X86/atomic-fp.ll
index 62296169e06d..a87da8428e0a 100644
--- a/llvm/test/CodeGen/X86/atomic-fp.ll
+++ b/llvm/test/CodeGen/X86/atomic-fp.ll
@@ -200,7 +200,7 @@ define dso_local void @fadd_32g() nounwind {
; X86-SSE1-NEXT: movl glob32, %eax
; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0
+; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, glob32
@@ -296,7 +296,7 @@ define dso_local void @fadd_64g() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, glob64
@@ -311,7 +311,7 @@ define dso_local void @fadd_64g() nounwind {
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, glob64
@@ -361,7 +361,7 @@ define dso_local void @fadd_32imm() nounwind {
; X86-SSE1-NEXT: movl -559038737, %eax
; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0
+; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, -559038737
@@ -459,7 +459,7 @@ define dso_local void @fadd_64imm() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, -559038737
@@ -474,7 +474,7 @@ define dso_local void @fadd_64imm() nounwind {
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, -559038737
@@ -526,7 +526,7 @@ define dso_local void @fadd_32stack() nounwind {
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, (%esp)
; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE1-NEXT: addss {{\.LCPI.*}}, %xmm0
+; X86-SSE1-NEXT: addss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -628,7 +628,7 @@ define dso_local void @fadd_64stack() nounwind {
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $16, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE2-NEXT: addsd {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
@@ -643,7 +643,7 @@ define dso_local void @fadd_64stack() nounwind {
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $16, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X86-AVX-NEXT: vaddsd {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vaddsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
diff --git a/llvm/test/CodeGen/X86/avx-cmp.ll b/llvm/test/CodeGen/X86/avx-cmp.ll
index 3398fcd7cc10..e22ebc82dda0 100644
--- a/llvm/test/CodeGen/X86/avx-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx-cmp.ll
@@ -49,7 +49,7 @@ define void @render(double %a0) nounwind {
; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
; CHECK-NEXT: vmovsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
-; CHECK-NEXT: vucomisd {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: vucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: jne .LBB2_5
; CHECK-NEXT: jnp .LBB2_2
; CHECK-NEXT: .LBB2_5: # %if.then
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
index 709507e1ed0c..7fff7e117b83 100644
--- a/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
@@ -645,8 +645,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
-; X86-AVX-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: # xmm1 = xmm1[0],mem[0]
; X86-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
; X86-AVX-NEXT: vmovupd %xmm0, (%eax) # encoding: [0xc5,0xf9,0x11,0x00]
@@ -656,8 +656,8 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X86-AVX512VL: # %bb.0:
; X86-AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
; X86-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
-; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI.*}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vmovhpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0]
; X86-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; X86-AVX512VL-NEXT: vmovupd %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00]
@@ -667,7 +667,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: # xmm1 = xmm1[0],mem[0]
; X64-AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x58,0xc1]
; X64-AVX-NEXT: vmovupd %xmm0, (%rdi) # encoding: [0xc5,0xf9,0x11,0x07]
@@ -677,7 +677,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vxorpd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9]
; X64-AVX512VL-NEXT: vmovhpd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: # xmm1 = xmm1[0],mem[0]
; X64-AVX512VL-NEXT: vaddpd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1]
; X64-AVX512VL-NEXT: vmovupd %xmm0, (%rdi) # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07]
diff --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll
index b694b98d04c4..960fbf069bad 100644
--- a/llvm/test/CodeGen/X86/avx2-arith.ll
+++ b/llvm/test/CodeGen/X86/avx2-arith.ll
@@ -148,7 +148,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
; X32-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
; X32-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
; X32-NEXT: vpmullw %ymm1, %ymm0, %ymm0
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
@@ -303,7 +303,7 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
define <8 x i32> @mul_const6(<8 x i32> %x) {
; X32-LABEL: mul_const6:
; X32: # %bb.0:
-; X32-NEXT: vpmulld {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: mul_const6:
diff --git a/llvm/test/CodeGen/X86/avx2-conversions.ll b/llvm/test/CodeGen/X86/avx2-conversions.ll
index 0c9c552d0f76..68ea4deb9454 100644
--- a/llvm/test/CodeGen/X86/avx2-conversions.ll
+++ b/llvm/test/CodeGen/X86/avx2-conversions.ll
@@ -159,7 +159,7 @@ define <16 x i16> @sext_16i8_16i16(<16 x i8> %z) {
define <16 x i8> @trunc_16i16_16i8(<16 x i16> %z) {
; X32-LABEL: trunc_16i16_16i8:
; X32: # %bb.0:
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vextracti128 $1, %ymm0, %xmm1
; X32-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
; X32-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll b/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll
index ec9517508ec4..c0f080e8fb9b 100644
--- a/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll
+++ b/llvm/test/CodeGen/X86/avx2-intrinsics-x86.ll
@@ -25,28 +25,28 @@ define <16 x i16> @test_x86_avx2_packssdw_fold() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
+; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packssdw_fold:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packssdw_fold:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,32767,65535,0,0,0,0,32769,32768,0,65280]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
ret <16 x i16> %res
@@ -74,28 +74,28 @@ define <32 x i8> @test_x86_avx2_packsswb_fold() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
+; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packsswb_fold:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packsswb_fold:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0,0,127,127,255,255,128,128,128,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
ret <32 x i8> %res
@@ -123,28 +123,28 @@ define <32 x i8> @test_x86_avx2_packuswb_fold() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
+; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packuswb_fold:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packuswb_fold:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16> <i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678, i16 0, i16 255, i16 256, i16 65535, i16 -1, i16 -255, i16 -256, i16 -32678>, <16 x i16> zeroinitializer)
ret <32 x i8> %res
@@ -753,28 +753,28 @@ define <16 x i16> @test_x86_avx2_packusdw_fold() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
+; X86-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_packusdw_fold:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_packusdw_fold:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [0,0,0,0,255,32767,65535,0,0,0,0,0,0,0,0,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfc,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> zeroinitializer, <8 x i32> <i32 255, i32 32767, i32 65535, i32 -1, i32 -32767, i32 -65535, i32 0, i32 -256>)
ret <16 x i16> %res
@@ -1025,26 +1025,26 @@ define <4 x i32> @test_x86_avx2_psllv_d_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295]
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
@@ -1053,12 +1053,12 @@ define <4 x i32> @test_x86_avx2_psllv_d_const() {
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3]
@@ -1067,12 +1067,12 @@ define <4 x i32> @test_x86_avx2_psllv_d_const() {
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x47,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [1,1,1,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x47,0xc9]
; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
@@ -1103,29 +1103,29 @@ define <8 x i32> @test_x86_avx2_psllv_d_256_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsllvd {{\.LCPI.*}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_d_256_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI.*}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
@@ -1133,14 +1133,14 @@ define <8 x i32> @test_x86_avx2_psllv_d_256_const() {
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
@@ -1148,14 +1148,14 @@ define <8 x i32> @test_x86_avx2_psllv_d_256_const() {
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x47,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x47,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res0 = call <8 x i32> @llvm.x86.avx2.psllv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
@@ -1184,36 +1184,36 @@ define <2 x i64> @test_x86_avx2_psllv_q_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4294967295,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4294967295,4294967295]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4294967295,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psllv_q_const:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,18446744073709551615]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_const:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,18446744073709551615]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x47,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psllv.q(<2 x i64> <i64 4, i64 -1>, <2 x i64> <i64 1, i64 -1>)
ret <2 x i64> %res
@@ -1240,36 +1240,36 @@ define <4 x i64> @test_x86_avx2_psllv_q_256_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4294967295,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4294967295,4294967295]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4294967295,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psllv_q_256_const:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,4,4,18446744073709551615]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psllv_q_256_const:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,18446744073709551615]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x47,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psllv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 -1>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
ret <4 x i64> %res
@@ -1296,29 +1296,29 @@ define <4 x i32> @test_x86_avx2_psrlv_d_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295]
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
@@ -1326,14 +1326,14 @@ define <4 x i32> @test_x86_avx2_psrlv_d_const() {
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,0,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
@@ -1341,14 +1341,14 @@ define <4 x i32> @test_x86_avx2_psrlv_d_const() {
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,0,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x45,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm1 # EVEX TO VEX Compression xmm1 = [4,4,4,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x45,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res0 = call <4 x i32> @llvm.x86.avx2.psrlv.d(<4 x i32> <i32 2, i32 9, i32 0, i32 -1>, <4 x i32> <i32 1, i32 0, i32 33, i32 -1>)
@@ -1378,29 +1378,29 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_d_256_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
@@ -1408,14 +1408,14 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256_const() {
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
@@ -1423,14 +1423,14 @@ define <8 x i32> @test_x86_avx2_psrlv_d_256_const() {
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,0,4294967295,3,7,4294967295,0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x45,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm1 # EVEX TO VEX Compression ymm1 = [4,4,4,4,4,4,4,4294967295]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %ymm1, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0x45,0x0d,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpaddd %ymm1, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1]
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res0 = call <8 x i32> @llvm.x86.avx2.psrlv.d.256(<8 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0>, <8 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2>)
@@ -1460,36 +1460,36 @@ define <2 x i64> @test_x86_avx2_psrlv_q_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,0,4,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4,0]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,0,4,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrlv_q_const:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [4,4]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_const:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0x45,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx2.psrlv.q(<2 x i64> <i64 4, i64 4>, <2 x i64> <i64 1, i64 -1>)
ret <2 x i64> %res
@@ -1517,36 +1517,36 @@ define <4 x i64> @test_x86_avx2_psrlv_q_256_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [4,0,4,0,4,0,4,0]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4,0]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,0,4,0,4,0,4,0]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrlv_q_256_const:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpbroadcastq {{.*#+}} ymm0 = [4,4,4,4]
; X64-AVX-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrlv_q_256_const:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vpbroadcastq {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4]
; X64-AVX512VL-NEXT: # encoding: [0xc4,0xe2,0x7d,0x59,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0x45,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i64> @llvm.x86.avx2.psrlv.q.256(<4 x i64> <i64 4, i64 4, i64 4, i64 4>, <4 x i64> <i64 1, i64 1, i64 1, i64 -1>)
ret <4 x i64> %res
@@ -1573,36 +1573,36 @@ define <4 x i32> @test_x86_avx2_psrav_d_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X86-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsravd {{\.LCPI.*}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_const:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [2,9,4294967284,23]
; X64-AVX-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_const:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,9,4294967284,23]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x46,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <4 x i32> @llvm.x86.avx2.psrav.d(<4 x i32> <i32 2, i32 9, i32 -12, i32 23>, <4 x i32> <i32 1, i32 18, i32 35, i32 52>)
ret <4 x i32> %res
@@ -1628,36 +1628,36 @@ define <8 x i32> @test_x86_avx2_psrav_d_256_const() {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; X86-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX-NEXT: retl # encoding: [0xc3]
;
; X86-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
+; X86-AVX512VL-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-AVX512VL-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-AVX512VL-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; X86-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-AVX-LABEL: test_x86_avx2_psrav_d_256_const:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: test_x86_avx2_psrav_d_256_const:
; X64-AVX512VL: # %bb.0:
; X64-AVX512VL-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx2.psrav.d.256(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>)
ret <8 x i32> %res
diff --git a/llvm/test/CodeGen/X86/avx2-nontemporal.ll b/llvm/test/CodeGen/X86/avx2-nontemporal.ll
index dac8b0e704ef..f0d8f9c6931a 100644
--- a/llvm/test/CodeGen/X86/avx2-nontemporal.ll
+++ b/llvm/test/CodeGen/X86/avx2-nontemporal.ll
@@ -15,21 +15,21 @@ define i32 @f(<8 x float> %A, i8* %B, <4 x double> %C, <4 x i64> %E, <8 x i32> %
; X32-NEXT: movl 8(%ebp), %ecx
; X32-NEXT: movl 136(%ebp), %edx
; X32-NEXT: movl (%edx), %eax
-; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vmovntps %ymm0, (%ecx)
-; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm2, %ymm0
+; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm0
; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx)
-; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm1, %ymm0
+; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntpd %ymm0, (%ecx)
-; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm5, %ymm0
+; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm5, %ymm0
; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx)
-; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm4, %ymm0
+; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm4, %ymm0
; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx)
-; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm3, %ymm0
+; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm0
; X32-NEXT: addl (%edx), %eax
; X32-NEXT: vmovntdq %ymm0, (%ecx)
; X32-NEXT: movl %ebp, %esp
diff --git a/llvm/test/CodeGen/X86/avx2-shift.ll b/llvm/test/CodeGen/X86/avx2-shift.ll
index a9338007ecec..bb6eceff081c 100644
--- a/llvm/test/CodeGen/X86/avx2-shift.ll
+++ b/llvm/test/CodeGen/X86/avx2-shift.ll
@@ -424,7 +424,7 @@ define <32 x i8> @shl9(<32 x i8> %A) nounwind {
; X32-LABEL: shl9:
; X32: # %bb.0:
; X32-NEXT: vpsllw $3, %ymm0, %ymm0
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shl9:
@@ -440,7 +440,7 @@ define <32 x i8> @shr9(<32 x i8> %A) nounwind {
; X32-LABEL: shr9:
; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: shr9:
@@ -472,7 +472,7 @@ define <32 x i8> @sra_v32i8(<32 x i8> %A) nounwind {
; X32-LABEL: sra_v32i8:
; X32: # %bb.0:
; X32-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X32-NEXT: vpsubb %ymm1, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
index 02e0b96e9d99..d88d62dfd322 100644
--- a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
+++ b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
@@ -7,7 +7,7 @@ define <4 x double> @test_broadcast_2f64_4f64(<2 x double> *%p) nounwind {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2f64_4f64:
@@ -26,7 +26,7 @@ define <4 x i64> @test_broadcast_2i64_4i64(<2 x i64> *%p) nounwind {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_2i64_4i64:
@@ -45,7 +45,7 @@ define <8 x float> @test_broadcast_4f32_8f32(<4 x float> *%p) nounwind {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4f32_8f32:
@@ -64,7 +64,7 @@ define <8 x i32> @test_broadcast_4i32_8i32(<4 x i32> *%p) nounwind {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_4i32_8i32:
@@ -83,7 +83,7 @@ define <16 x i16> @test_broadcast_8i16_16i16(<8 x i16> *%p) nounwind {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_8i16_16i16:
@@ -102,7 +102,7 @@ define <32 x i8> @test_broadcast_16i8_32i8(<16 x i8> *%p) nounwind {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_broadcast_16i8_32i8:
@@ -122,7 +122,7 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(<2 x double>* %p0, <2 x doub
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT: vaddpd {{\.LCPI.*}}, %ymm1, %ymm0
+; X32-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovapd %xmm1, (%eax)
; X32-NEXT: retl
;
@@ -145,7 +145,7 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(<2 x i64>* %p0, <2 x i64>* %p1)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT: vpaddq {{\.LCPI.*}}, %ymm1, %ymm0
+; X32-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl
;
@@ -168,7 +168,7 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(<4 x float>* %p0, <4 x float>
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT: vaddps {{\.LCPI.*}}, %ymm1, %ymm0
+; X32-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovaps %xmm1, (%eax)
; X32-NEXT: retl
;
@@ -191,7 +191,7 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(<4 x i32>* %p0, <4 x i32>* %p1)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT: vpaddd {{\.LCPI.*}}, %ymm1, %ymm0
+; X32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl
;
@@ -214,7 +214,7 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(<8 x i16> *%p0, <8 x i16> *%p
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT: vpaddw {{\.LCPI.*}}, %ymm1, %ymm0
+; X32-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl
;
@@ -237,7 +237,7 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(<16 x i8> *%p0, <16 x i8> *%p1)
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X32-NEXT: vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT: vpaddb {{\.LCPI.*}}, %ymm1, %ymm0
+; X32-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X32-NEXT: vmovdqa %xmm1, (%eax)
; X32-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
index d357150cde4e..c1de8cd0627b 100644
--- a/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
+++ b/llvm/test/CodeGen/X86/avx2-vector-shifts.ll
@@ -486,10 +486,10 @@ define <32 x i8> @shl_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsllw $4, %ymm0, %ymm2
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsllw $2, %ymm0, %ymm2
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpaddb %ymm0, %ymm0, %ymm2
@@ -692,14 +692,14 @@ define <32 x i8> @lshr_32i8(<32 x i8> %r, <32 x i8> %a) nounwind {
; X32: # %bb.0:
; X32-NEXT: vpsllw $5, %ymm1, %ymm1
; X32-NEXT: vpsrlw $4, %ymm0, %ymm2
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $2, %ymm0, %ymm2
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: vpsrlw $1, %ymm0, %ymm2
-; X32-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X32-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X32-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X32-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
index 120d0ec83c4a..e78ba264d7cc 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
@@ -1840,7 +1840,7 @@ define <2 x double> @test_mm_cvtu64_sd(<2 x double> %__A, i64 %__B) {
; X86-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm1, %xmm1
; X86-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
-; X86-NEXT: vsubpd {{\.LCPI.*}}, %xmm1, %xmm1
+; X86-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; X86-NEXT: vaddsd %xmm1, %xmm2, %xmm1
; X86-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
@@ -1888,7 +1888,7 @@ define <4 x float> @test_mm_cvtu64_ss(<4 x float> %__A, i64 %__B) {
; X86-NEXT: vmovq %xmm1, {{[0-9]+}}(%esp)
; X86-NEXT: shrl $31, %eax
; X86-NEXT: fildll {{[0-9]+}}(%esp)
-; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
@@ -3118,7 +3118,7 @@ entry:
define <8 x double> @test_mm512_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fmsub_round_pd:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2
+; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2
; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl
;
@@ -3178,7 +3178,7 @@ entry:
define <8 x double> @test_mm512_fnmadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fnmadd_round_pd:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
+; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl
;
@@ -3349,7 +3349,7 @@ entry:
define <8 x double> @test_mm512_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fmsub_pd:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2
+; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2
; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl
;
@@ -3409,7 +3409,7 @@ entry:
define <8 x double> @test_mm512_fnmadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fnmadd_pd:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
+; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
; X86-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl
;
@@ -3582,7 +3582,7 @@ entry:
define <16 x float> @test_mm512_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fmsub_round_ps:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2
+; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2
; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl
;
@@ -3642,7 +3642,7 @@ entry:
define <16 x float> @test_mm512_fnmadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fnmadd_round_ps:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl
;
@@ -3813,7 +3813,7 @@ entry:
define <16 x float> @test_mm512_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fmsub_ps:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2
+; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2
; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl
;
@@ -3873,7 +3873,7 @@ entry:
define <16 x float> @test_mm512_fnmadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fnmadd_ps:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; X86-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
; X86-NEXT: retl
;
@@ -4046,7 +4046,7 @@ entry:
define <8 x double> @test_mm512_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_fmsubadd_round_pd:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxorq {{\.LCPI.*}}{1to8}, %zmm2, %zmm2
+; X86-NEXT: vpxorq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm2, %zmm2
; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl
;
@@ -4323,7 +4323,7 @@ entry:
define <16 x float> @test_mm512_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_fmsubadd_round_ps:
; X86: # %bb.0: # %entry
-; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2
+; X86-NEXT: vpxord {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm2, %zmm2
; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0
; X86-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
index c008cc094ada..818cd76946db 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics.ll
@@ -7118,9 +7118,9 @@ define <16 x i32> @test_x86_avx512_psllv_d_512_const() {
; X86-LABEL: test_x86_avx512_psllv_d_512_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
-; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
-; X86-NEXT: vpsllvd {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; X86-NEXT: retl
%res0 = call <16 x i32> @llvm.x86.avx512.psllv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
@@ -7191,9 +7191,9 @@ define <8 x i64> @test_x86_avx512_psllv_q_512_const() {
; X86-LABEL: test_x86_avx512_psllv_q_512_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
-; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
-; X86-NEXT: vpsllvq {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; X86-NEXT: retl
%res0 = call <8 x i64> @llvm.x86.avx512.psllv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
@@ -7366,9 +7366,9 @@ define <16 x i32> @test_x86_avx512_psrlv_d_512_const() {
; X86-LABEL: test_x86_avx512_psrlv_d_512_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,0,4294967295,3,7,4294967295,0,4,5,4294967294,0,5,3,4294967293,0]
-; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4294967295]
-; X86-NEXT: vpsrlvd {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddd %zmm1, %zmm0, %zmm0
; X86-NEXT: retl
%res0 = call <16 x i32> @llvm.x86.avx512.psrlv.d.512(<16 x i32> <i32 2, i32 9, i32 0, i32 -1, i32 3, i32 7, i32 -1, i32 0, i32 4, i32 5, i32 -2, i32 0, i32 5, i32 3, i32 -3, i32 0>, <16 x i32> <i32 1, i32 0, i32 33, i32 -1,i32 2, i32 0, i32 34, i32 -2, i32 3, i32 0, i32 35, i32 -1, i32 4, i32 0, i32 36, i32 -3>)
@@ -7439,9 +7439,9 @@ define <8 x i64> @test_x86_avx512_psrlv_q_512_const() {
; X86-LABEL: test_x86_avx512_psrlv_q_512_const:
; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,0,9,0,0,0,4294967295,4294967295,3,0,7,0,4294967295,4294967295,0,0]
-; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,0,4,0,4,0,4,0,4,0,4,0,4,0,4294967295,4294967295]
-; X86-NEXT: vpsrlvq {{\.LCPI.*}}, %zmm1, %zmm1
+; X86-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm1
; X86-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; X86-NEXT: retl
%res0 = call <8 x i64> @llvm.x86.avx512.psrlv.q.512(<8 x i64> <i64 2, i64 9, i64 0, i64 -1, i64 3, i64 7, i64 -1, i64 0>, <8 x i64> <i64 1, i64 0, i64 33, i64 -1,i64 2, i64 0, i64 34, i64 -2>)
diff --git a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll
index 0ed73f142a84..462730a8f6c8 100644
--- a/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512bw-intrinsics.ll
@@ -1292,18 +1292,18 @@ define <32 x i16> @test_x86_avx512_psrlv_w_512_const() optsize {
; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx512_psrlv_w_512_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x10,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res1 = call <32 x i16> @llvm.x86.avx512.psrlv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <32 x i16> %res1
@@ -1410,18 +1410,18 @@ define <32 x i16>@test_int_x86_avx512_mask_psrav32_hi_const(<32 x i16> %x0, <32
; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51]
; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsravw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsravw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_psrav32_hi_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51,2,9,65524,23,65510,37,65496,51]
; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x11,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%1 = call <32 x i16> @llvm.x86.avx512.psrav.w.512(<32 x i16> <i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51, i16 2, i16 9, i16 -12, i16 23, i16 -26, i16 37, i16 -40, i16 51>, <32 x i16> <i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49, i16 1, i16 10, i16 35, i16 52, i16 69, i16 9, i16 16, i16 49>)
ret <32 x i16> %1
@@ -1575,18 +1575,18 @@ define <32 x i16> @test_x86_avx512_psllv_w_512_const() optsize {
; X86: # %bb.0:
; X86-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsllvw {{\.LCPI.*}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_x86_avx512_psllv_w_512_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0 # encoding: [0x62,0xf2,0xfd,0x48,0x12,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res1 = call <32 x i16> @llvm.x86.avx512.psllv.w.512(<32 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <32 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <32 x i16> %res1
diff --git a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
index ab1af2c44a5d..03f1a2d51330 100644
--- a/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx512bwvl-intrinsics.ll
@@ -2153,20 +2153,20 @@ define <8 x i16>@test_int_x86_avx512_maskz_psrlv8_hi(<8 x i16> %x0, <8 x i16> %x
define <8 x i16> @test_int_x86_avx512_psrlv_w_128_const() optsize {
; X86-LABEL: test_int_x86_avx512_psrlv_w_128_const:
; X86: # %bb.0:
-; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
+; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psrlv_w_128_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x10,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <8 x i16> %res
@@ -2177,20 +2177,20 @@ declare <8 x i16> @llvm.x86.avx512.psrlv.w.128(<8 x i16>, <8 x i16>)
define <16 x i16> @test_int_x86_avx512_psrlv_w_256_const() optsize {
; X86-LABEL: test_int_x86_avx512_psrlv_w_256_const:
; X86: # %bb.0:
-; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
+; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsrlvw {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsrlvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psrlv_w_256_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x10,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.psrlv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <16 x i16> %res
@@ -2397,20 +2397,20 @@ define <8 x i16>@test_int_x86_avx512_maskz_psllv8_hi(<8 x i16> %x0, <8 x i16> %x
define <8 x i16> @test_int_x86_avx512_psllv_w_128_const() optsize {
; X86-LABEL: test_int_x86_avx512_psllv_w_128_const:
; X86: # %bb.0:
-; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
+; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsllvw {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psllv_w_128_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x12,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res = call <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <8 x i16> %res
@@ -2422,20 +2422,20 @@ declare <8 x i16> @llvm.x86.avx512.psllv.w.128(<8 x i16>, <8 x i16>)
define <16 x i16> @test_int_x86_avx512_psllv_w_256_const() optsize {
; X86-LABEL: test_int_x86_avx512_psllv_w_256_const:
; X86: # %bb.0:
-; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
+; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsllvw {{\.LCPI.*}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsllvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_psllv_w_256_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,65535]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm0 # encoding: [0x62,0xf2,0xfd,0x28,0x12,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res = call <16 x i16> @llvm.x86.avx512.psllv.w.256(<16 x i16> <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 -1>, <16 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 -1>)
ret <16 x i16> %res
diff --git a/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll b/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll
index fe0d264e370b..d00297a408a6 100644
--- a/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll
+++ b/llvm/test/CodeGen/X86/avx512vbmi2-funnel-shifts.ll
@@ -5,7 +5,7 @@
define <8 x i64> @avx512_funnel_shift_q_512(<8 x i64> %a0, <8 x i64> %a1) {
; X86-LABEL: avx512_funnel_shift_q_512:
; X86: # %bb.0:
-; X86-NEXT: vpshldvq {{\.LCPI.*}}, %zmm1, %zmm0
+; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_q_512:
@@ -32,7 +32,7 @@ define <8 x i64> @avx512_funnel_shift_q_512_splat(<8 x i64> %a0, <8 x i64> %a1)
define <16 x i32> @avx512_funnel_shift_d_512(<16 x i32> %a0, <16 x i32> %a1) {
; X86-LABEL: avx512_funnel_shift_d_512:
; X86: # %bb.0:
-; X86-NEXT: vpshldvd {{\.LCPI.*}}, %zmm1, %zmm0
+; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_d_512:
@@ -59,7 +59,7 @@ define <16 x i32> @avx512_funnel_shift_d_512_splat(<16 x i32> %a0, <16 x i32> %a
define <32 x i16> @avx512_funnel_shift_w_512(<32 x i16> %a0, <32 x i16> %a1) {
; X86-LABEL: avx512_funnel_shift_w_512:
; X86: # %bb.0:
-; X86-NEXT: vpshldvw {{\.LCPI.*}}, %zmm1, %zmm0
+; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %zmm1, %zmm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_w_512:
diff --git a/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll b/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll
index 465abad18020..588039ea78bf 100644
--- a/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll
+++ b/llvm/test/CodeGen/X86/avx512vbmi2vl-funnel-shifts.ll
@@ -5,7 +5,7 @@
define <2 x i64> @avx512_funnel_shift_q_128(<2 x i64> %a0, <2 x i64> %a1) {
; X86-LABEL: avx512_funnel_shift_q_128:
; X86: # %bb.0:
-; X86-NEXT: vpshldvq {{\.LCPI.*}}, %xmm1, %xmm0
+; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_q_128:
@@ -21,7 +21,7 @@ define <2 x i64> @avx512_funnel_shift_q_128(<2 x i64> %a0, <2 x i64> %a1) {
define <4 x i64> @avx512_funnel_shift_q_256(<4 x i64> %a0, <4 x i64> %a1) {
; X86-LABEL: avx512_funnel_shift_q_256:
; X86: # %bb.0:
-; X86-NEXT: vpshldvq {{\.LCPI.*}}, %ymm1, %ymm0
+; X86-NEXT: vpshldvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_q_256:
@@ -59,7 +59,7 @@ define <4 x i64> @avx512_funnel_shift_q_256_splat(<4 x i64> %a0, <4 x i64> %a1)
define <4 x i32> @avx512_funnel_shift_d_128(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: avx512_funnel_shift_d_128:
; X86: # %bb.0:
-; X86-NEXT: vpshldvd {{\.LCPI.*}}, %xmm1, %xmm0
+; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_d_128:
@@ -75,7 +75,7 @@ define <4 x i32> @avx512_funnel_shift_d_128(<4 x i32> %a0, <4 x i32> %a1) {
define <8 x i32> @avx512_funnel_shift_d_256(<8 x i32> %a0, <8 x i32> %a1) {
; X86-LABEL: avx512_funnel_shift_d_256:
; X86: # %bb.0:
-; X86-NEXT: vpshldvd {{\.LCPI.*}}, %ymm1, %ymm0
+; X86-NEXT: vpshldvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_d_256:
@@ -113,7 +113,7 @@ define <8 x i32> @avx512_funnel_shift_d_256_splat(<8 x i32> %a0, <8 x i32> %a1)
define <8 x i16> @avx512_funnel_shift_w_128(<8 x i16> %a0, <8 x i16> %a1) {
; X86-LABEL: avx512_funnel_shift_w_128:
; X86: # %bb.0:
-; X86-NEXT: vpshldvw {{\.LCPI.*}}, %xmm1, %xmm0
+; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_w_128:
@@ -129,7 +129,7 @@ define <8 x i16> @avx512_funnel_shift_w_128(<8 x i16> %a0, <8 x i16> %a1) {
define <16 x i16> @avx512_funnel_shift_w_256(<16 x i16> %a0, <16 x i16> %a1) {
; X86-LABEL: avx512_funnel_shift_w_256:
; X86: # %bb.0:
-; X86-NEXT: vpshldvw {{\.LCPI.*}}, %ymm1, %ymm0
+; X86-NEXT: vpshldvw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: avx512_funnel_shift_w_256:
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
index 83f39c1dd7da..fd90a9fd1baa 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll
@@ -1905,7 +1905,7 @@ define <2 x i64> @test_mm_mask_set1_epi32(<2 x i64> %__O, i8 zeroext %__M) {
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1}
+; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_mask_set1_epi32:
@@ -1927,7 +1927,7 @@ define <2 x i64> @test_mm_maskz_set1_epi32(i8 zeroext %__M) {
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %xmm0 {%k1} {z}
+; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 {%k1} {z}
; X86-NEXT: retl
;
; X64-LABEL: test_mm_maskz_set1_epi32:
@@ -1948,7 +1948,7 @@ define <4 x i64> @test_mm256_mask_set1_epi32(<4 x i64> %__O, i8 zeroext %__M) {
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1}
+; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 {%k1}
; X86-NEXT: retl
;
; X64-LABEL: test_mm256_mask_set1_epi32:
@@ -1969,7 +1969,7 @@ define <4 x i64> @test_mm256_maskz_set1_epi32(i8 zeroext %__M) {
; X86: # %bb.0: # %entry
; X86-NEXT: movb {{[0-9]+}}(%esp), %al
; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm0 {%k1} {z}
+; X86-NEXT: vpbroadcastd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 {%k1} {z}
; X86-NEXT: retl
;
; X64-LABEL: test_mm256_maskz_set1_epi32:
diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
index 15054e49b8ca..1fef7ad034e6 100644
--- a/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
@@ -7321,20 +7321,20 @@ define <8 x i32>@test_int_x86_avx512_maskz_psrav8_si(<8 x i32> %x0, <8 x i32> %x
define <8 x i32>@test_int_x86_avx512_mask_psrav8_si_const() {
; X86-LABEL: test_int_x86_avx512_mask_psrav8_si_const:
; X86: # %bb.0:
-; X86-NEXT: vmovdqa {{\.LCPI.*}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
+; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X86-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_psrav8_si_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %ymm0 # EVEX TO VEX Compression ymm0 = [2,9,4294967284,23,4294967270,37,4294967256,51]
; X64-NEXT: # encoding: [0xc5,0xfd,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x46,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res = call <8 x i32> @llvm.x86.avx512.mask.psrav8.si(<8 x i32> <i32 2, i32 9, i32 -12, i32 23, i32 -26, i32 37, i32 -40, i32 51>, <8 x i32> <i32 1, i32 18, i32 35, i32 52, i32 69, i32 15, i32 32, i32 49>, <8 x i32> zeroinitializer, i8 -1)
ret <8 x i32> %res
@@ -8632,20 +8632,20 @@ define <2 x i64>@test_int_x86_avx512_maskz_psrav_q_128(<2 x i64> %x0, <2 x i64>
define <2 x i64>@test_int_x86_avx512_mask_psrav_q_128_const(i8 %x3) {
; X86-LABEL: test_int_x86_avx512_mask_psrav_q_128_const:
; X86: # %bb.0:
-; X86-NEXT: vmovdqa {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,0,4294967287,4294967295]
+; X86-NEXT: vmovdqa {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [2,0,4294967287,4294967295]
; X86-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
-; X86-NEXT: vpsravq {{\.LCPI.*}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A]
-; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X86-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
+; X86-NEXT: vpsravq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A]
+; X86-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X86-NEXT: retl # encoding: [0xc3]
;
; X64-LABEL: test_int_x86_avx512_mask_psrav_q_128_const:
; X64: # %bb.0:
; X64-NEXT: vmovdqa {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [2,18446744073709551607]
; X64-NEXT: # encoding: [0xc5,0xf9,0x6f,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: vpsravq {{.*}}(%rip), %xmm0, %xmm0 # encoding: [0x62,0xf2,0xfd,0x08,0x46,0x05,A,A,A,A]
-; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-NEXT: # fixup A - offset: 6, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-NEXT: retq # encoding: [0xc3]
%res = call <2 x i64> @llvm.x86.avx512.mask.psrav.q.128(<2 x i64> <i64 2, i64 -9>, <2 x i64> <i64 1, i64 90>, <2 x i64> zeroinitializer, i8 -1)
ret <2 x i64> %res
diff --git a/llvm/test/CodeGen/X86/bitreverse.ll b/llvm/test/CodeGen/X86/bitreverse.ll
index 8e2f6f9b463b..e10f67d14639 100644
--- a/llvm/test/CodeGen/X86/bitreverse.ll
+++ b/llvm/test/CodeGen/X86/bitreverse.ll
@@ -79,7 +79,7 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
;
; X86XOP-LABEL: test_bitreverse_v2i16:
; X86XOP: # %bb.0:
-; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: retl
%b = call <2 x i16> @llvm.bitreverse.v2i16(<2 x i16> %a)
ret <2 x i16> %b
@@ -155,7 +155,7 @@ define i64 @test_bitreverse_i64(i64 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i64:
; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: vpextrd $1, %xmm0, %edx
; X86XOP-NEXT: retl
@@ -213,7 +213,7 @@ define i32 @test_bitreverse_i32(i32 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i32:
; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: retl
%b = call i32 @llvm.bitreverse.i32(i32 %a)
@@ -272,7 +272,7 @@ define i24 @test_bitreverse_i24(i24 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i24:
; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: shrl $8, %eax
; X86XOP-NEXT: retl
@@ -332,7 +332,7 @@ define i16 @test_bitreverse_i16(i16 %a) nounwind {
; X86XOP-LABEL: test_bitreverse_i16:
; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: # kill: def $ax killed $ax killed $eax
; X86XOP-NEXT: retl
@@ -383,7 +383,7 @@ define i8 @test_bitreverse_i8(i8 %a) {
; X86XOP-LABEL: test_bitreverse_i8:
; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: # kill: def $al killed $al killed $eax
; X86XOP-NEXT: retl
@@ -436,7 +436,7 @@ define i4 @test_bitreverse_i4(i4 %a) {
; X86XOP-LABEL: test_bitreverse_i4:
; X86XOP: # %bb.0:
; X86XOP-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86XOP-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86XOP-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86XOP-NEXT: vmovd %xmm0, %eax
; X86XOP-NEXT: shrb $4, %al
; X86XOP-NEXT: # kill: def $al killed $al killed $eax
diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
index 4840369f7d93..6ce20f18d443 100644
--- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
+++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll
@@ -128,7 +128,7 @@ define <32 x i8> @f32xi8_i16(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i16:
@@ -168,7 +168,7 @@ define <32 x i8> @f32xi8_i32(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i32:
@@ -209,7 +209,7 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i64:
@@ -250,7 +250,7 @@ define <32 x i8> @f32xi8_i128(<32 x i8> %a) {
; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f32xi8_i128:
@@ -716,7 +716,7 @@ define <16 x i16> @f16xi16_i32(<16 x i16> %a) {
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi16_i32:
@@ -757,7 +757,7 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) {
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi16_i64:
@@ -798,7 +798,7 @@ define <16 x i16> @f16xi16_i128(<16 x i16> %a) {
; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f16xi16_i128:
@@ -1161,7 +1161,7 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) {
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xi32_i64:
@@ -1202,7 +1202,7 @@ define <8 x i32> @f8xi32_i128(<8 x i32> %a) {
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f8xi32_i128:
@@ -1386,7 +1386,7 @@ define <4 x i64> @f4xi64_i128(<4 x i64> %a) {
; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX-NEXT: retl
;
; ALL32-LABEL: f4xi64_i128:
diff --git a/llvm/test/CodeGen/X86/cmov-fp.ll b/llvm/test/CodeGen/X86/cmov-fp.ll
index 756324bbdfdc..9af5483b95c0 100644
--- a/llvm/test/CodeGen/X86/cmov-fp.ll
+++ b/llvm/test/CodeGen/X86/cmov-fp.ll
@@ -32,7 +32,7 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -43,7 +43,7 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -54,7 +54,7 @@ define double @test1(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: ja .LBB0_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -95,7 +95,7 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnb %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -106,7 +106,7 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnb %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -117,7 +117,7 @@ define double @test2(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jae .LBB1_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -158,7 +158,7 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovb %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -169,7 +169,7 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovb %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -180,7 +180,7 @@ define double @test3(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jb .LBB2_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -221,7 +221,7 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -232,7 +232,7 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: fldl {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -243,7 +243,7 @@ define double @test4(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jbe .LBB3_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -286,7 +286,7 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setg %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -299,7 +299,7 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setg %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -310,7 +310,7 @@ define double @test5(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jg .LBB4_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -353,7 +353,7 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setge %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -366,7 +366,7 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setge %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -377,7 +377,7 @@ define double @test6(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jge .LBB5_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -420,7 +420,7 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setl %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -433,7 +433,7 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setl %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -444,7 +444,7 @@ define double @test7(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jl .LBB6_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -487,7 +487,7 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setle %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -500,7 +500,7 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setle %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -511,7 +511,7 @@ define double @test8(i32 %a, i32 %b, double %x) nounwind {
; NOCMOV-NEXT: fldl {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jle .LBB7_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -565,7 +565,7 @@ define float @test9(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -576,7 +576,7 @@ define float @test9(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: ja .LBB8_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -630,7 +630,7 @@ define float @test10(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnb %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -641,7 +641,7 @@ define float @test10(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jae .LBB9_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -695,7 +695,7 @@ define float @test11(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovb %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -706,7 +706,7 @@ define float @test11(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jb .LBB10_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -760,7 +760,7 @@ define float @test12(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: flds {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -771,7 +771,7 @@ define float @test12(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jbe .LBB11_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -827,7 +827,7 @@ define float @test13(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setg %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -838,7 +838,7 @@ define float @test13(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jg .LBB12_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -894,7 +894,7 @@ define float @test14(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setge %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -905,7 +905,7 @@ define float @test14(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jge .LBB13_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -961,7 +961,7 @@ define float @test15(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setl %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -972,7 +972,7 @@ define float @test15(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jl .LBB14_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1028,7 +1028,7 @@ define float @test16(i32 %a, i32 %b, float %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setle %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1039,7 +1039,7 @@ define float @test16(i32 %a, i32 %b, float %x) nounwind {
; NOCMOV-NEXT: flds {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jle .LBB15_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1058,7 +1058,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovnbe %st(1), %st
@@ -1070,7 +1070,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1081,7 +1081,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1092,7 +1092,7 @@ define x86_fp80 @test17(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: ja .LBB16_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1111,7 +1111,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovnb %st(1), %st
@@ -1123,7 +1123,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovnb %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1134,7 +1134,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovnb %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1145,7 +1145,7 @@ define x86_fp80 @test18(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jae .LBB17_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1164,7 +1164,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovb %st(1), %st
@@ -1176,7 +1176,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovb %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1187,7 +1187,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovb %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1198,7 +1198,7 @@ define x86_fp80 @test19(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jb .LBB18_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1217,7 +1217,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fcmovbe %st(1), %st
@@ -1229,7 +1229,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovbe %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1240,7 +1240,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: fldt {{[0-9]+}}(%esp)
; NOSSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovbe %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1251,7 +1251,7 @@ define x86_fp80 @test20(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jbe .LBB19_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1270,7 +1270,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setg %al
@@ -1286,7 +1286,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setg %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1299,7 +1299,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setg %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1310,7 +1310,7 @@ define x86_fp80 @test21(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jg .LBB20_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1330,7 +1330,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setge %al
@@ -1346,7 +1346,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setge %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1359,7 +1359,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setge %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1370,7 +1370,7 @@ define x86_fp80 @test22(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jge .LBB21_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1389,7 +1389,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setl %al
@@ -1405,7 +1405,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setl %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1418,7 +1418,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setl %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1429,7 +1429,7 @@ define x86_fp80 @test23(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jl .LBB22_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
@@ -1448,7 +1448,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; SSE-NEXT: flds {{\.LCPI.*}}
+; SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; SSE-NEXT: fxch %st(1)
; SSE-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; SSE-NEXT: setle %al
@@ -1464,7 +1464,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE2-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE2-NEXT: setle %al
; NOSSE2-NEXT: testb %al, %al
-; NOSSE2-NEXT: flds {{\.LCPI.*}}
+; NOSSE2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE2-NEXT: fxch %st(1)
; NOSSE2-NEXT: fcmovne %st(1), %st
; NOSSE2-NEXT: fstp %st(1)
@@ -1477,7 +1477,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOSSE1-NEXT: cmpl {{[0-9]+}}(%esp), %eax
; NOSSE1-NEXT: setle %al
; NOSSE1-NEXT: testb %al, %al
-; NOSSE1-NEXT: flds {{\.LCPI.*}}
+; NOSSE1-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOSSE1-NEXT: fxch %st(1)
; NOSSE1-NEXT: fcmovne %st(1), %st
; NOSSE1-NEXT: fstp %st(1)
@@ -1488,7 +1488,7 @@ define x86_fp80 @test24(i32 %a, i32 %b, x86_fp80 %x) nounwind {
; NOCMOV-NEXT: fldt {{[0-9]+}}(%esp)
; NOCMOV-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOCMOV-NEXT: cmpl {{[0-9]+}}(%esp), %eax
-; NOCMOV-NEXT: flds {{\.LCPI.*}}
+; NOCMOV-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; NOCMOV-NEXT: jle .LBB23_2
; NOCMOV-NEXT: # %bb.1:
; NOCMOV-NEXT: fstp %st(0)
diff --git a/llvm/test/CodeGen/X86/cmp.ll b/llvm/test/CodeGen/X86/cmp.ll
index ce441bbcc388..bda450137d02 100644
--- a/llvm/test/CodeGen/X86/cmp.ll
+++ b/llvm/test/CodeGen/X86/cmp.ll
@@ -108,12 +108,12 @@ define dso_local i32 @test5(double %A) nounwind {
; CHECK-LABEL: test5:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
-; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: ja .LBB5_3 # encoding: [0x77,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
; CHECK-NEXT: # %bb.1: # %entry
; CHECK-NEXT: ucomisd {{.*}}(%rip), %xmm0 # encoding: [0x66,0x0f,0x2e,0x05,A,A,A,A]
-; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: jb .LBB5_3 # encoding: [0x72,A]
; CHECK-NEXT: # fixup A - offset: 1, value: .LBB5_3-1, kind: FK_PCRel_1
; CHECK-NEXT: # %bb.2: # %bb12
diff --git a/llvm/test/CodeGen/X86/code-model-elf.ll b/llvm/test/CodeGen/X86/code-model-elf.ll
index ab25b64960a7..dee7605e541e 100644
--- a/llvm/test/CodeGen/X86/code-model-elf.ll
+++ b/llvm/test/CodeGen/X86/code-model-elf.ll
@@ -417,13 +417,13 @@ define dso_local float @load_constant_pool(float %x) #0 {
;
; MEDIUM-STATIC-LABEL: load_constant_pool:
; MEDIUM-STATIC: # %bb.0:
-; MEDIUM-STATIC-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; MEDIUM-STATIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; MEDIUM-STATIC-NEXT: addss (%rax), %xmm0
; MEDIUM-STATIC-NEXT: retq
;
; LARGE-STATIC-LABEL: load_constant_pool:
; LARGE-STATIC: # %bb.0:
-; LARGE-STATIC-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; LARGE-STATIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; LARGE-STATIC-NEXT: addss (%rax), %xmm0
; LARGE-STATIC-NEXT: retq
;
@@ -435,7 +435,7 @@ define dso_local float @load_constant_pool(float %x) #0 {
; MEDIUM-PIC-LABEL: load_constant_pool:
; MEDIUM-PIC: # %bb.0:
; MEDIUM-PIC-NEXT: leaq {{.*}}(%rip), %rax
-; MEDIUM-PIC-NEXT: movabsq ${{\.LCPI.*}}@GOTOFF, %rcx
+; MEDIUM-PIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}@GOTOFF, %rcx
; MEDIUM-PIC-NEXT: addss (%rax,%rcx), %xmm0
; MEDIUM-PIC-NEXT: retq
;
@@ -445,7 +445,7 @@ define dso_local float @load_constant_pool(float %x) #0 {
; LARGE-PIC-NEXT: leaq .L11${{.*}}(%rip), %rax
; LARGE-PIC-NEXT: movabsq $_GLOBAL_OFFSET_TABLE_-.L11$pb, %rcx
; LARGE-PIC-NEXT: addq %rax, %rcx
-; LARGE-PIC-NEXT: movabsq ${{\.LCPI.*}}@GOTOFF, %rax
+; LARGE-PIC-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}@GOTOFF, %rax
; LARGE-PIC-NEXT: addss (%rcx,%rax), %xmm0
; LARGE-PIC-NEXT: retq
%a = fadd float %x, 1.0
diff --git a/llvm/test/CodeGen/X86/combine-bextr.ll b/llvm/test/CodeGen/X86/combine-bextr.ll
index a6cf651a3992..8744beb1e20e 100644
--- a/llvm/test/CodeGen/X86/combine-bextr.ll
+++ b/llvm/test/CodeGen/X86/combine-bextr.ll
@@ -40,8 +40,8 @@ define float @bextr_uitofp(i32 %x, i32 %y) {
; X32-NEXT: movl $3855, %eax # imm = 0xF0F
; X32-NEXT: bextrl %eax, {{[0-9]+}}(%esp), %eax
; X32-NEXT: movd %eax, %xmm0
-; X32-NEXT: por {{\.LCPI.*}}, %xmm0
-; X32-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: cvtsd2ss %xmm0, %xmm0
; X32-NEXT: movss %xmm0, (%esp)
; X32-NEXT: flds (%esp)
diff --git a/llvm/test/CodeGen/X86/combine-bitreverse.ll b/llvm/test/CodeGen/X86/combine-bitreverse.ll
index 8d268ddd75ee..1875c3f889f0 100644
--- a/llvm/test/CodeGen/X86/combine-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/combine-bitreverse.ll
@@ -51,23 +51,23 @@ define <4 x i32> @test_demandedbits_bitreverse(<4 x i32> %a0) nounwind {
; X86-NEXT: packuswb %xmm2, %xmm0
; X86-NEXT: movdqa %xmm0, %xmm1
; X86-NEXT: psllw $4, %xmm1
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-NEXT: psrlw $4, %xmm0
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: por %xmm1, %xmm0
; X86-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-NEXT: pand %xmm0, %xmm1
; X86-NEXT: psllw $2, %xmm1
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: psrlw $2, %xmm0
; X86-NEXT: por %xmm1, %xmm0
; X86-NEXT: movdqa {{.*#+}} xmm1 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; X86-NEXT: pand %xmm0, %xmm1
; X86-NEXT: paddb %xmm1, %xmm1
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: psrlw $1, %xmm0
; X86-NEXT: por %xmm1, %xmm0
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test_demandedbits_bitreverse:
diff --git a/llvm/test/CodeGen/X86/combine-multiplies.ll b/llvm/test/CodeGen/X86/combine-multiplies.ll
index 052c96fd2159..73f9642a2197 100644
--- a/llvm/test/CodeGen/X86/combine-multiplies.ll
+++ b/llvm/test/CodeGen/X86/combine-multiplies.ll
@@ -116,7 +116,7 @@ define void @testCombineMultiplies_splat(<4 x i32> %v1) nounwind {
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,242,242,242]
; CHECK-NEXT: paddd %xmm0, %xmm2
-; CHECK-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movdqa %xmm2, v2
; CHECK-NEXT: movdqa %xmm0, v3
; CHECK-NEXT: movdqa %xmm1, x
@@ -151,7 +151,7 @@ define void @testCombineMultiplies_non_splat(<4 x i32> %v1) nounwind {
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [242,726,1452,2420]
; CHECK-NEXT: paddd %xmm0, %xmm2
-; CHECK-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movdqa %xmm2, v2
; CHECK-NEXT: movdqa %xmm0, v3
; CHECK-NEXT: movdqa %xmm1, x
diff --git a/llvm/test/CodeGen/X86/extractelement-fp.ll b/llvm/test/CodeGen/X86/extractelement-fp.ll
index 137b98db28a3..89b3f4f7c0ec 100644
--- a/llvm/test/CodeGen/X86/extractelement-fp.ll
+++ b/llvm/test/CodeGen/X86/extractelement-fp.ll
@@ -328,7 +328,7 @@ define <3 x double> @extvselectsetcc_crash(<2 x double> %x) {
;
; X86-LABEL: extvselectsetcc_crash:
; X86: # %bb.0:
-; X86-NEXT: vcmpeqpd {{\.LCPI.*}}, %xmm0, %xmm1
+; X86-NEXT: vcmpeqpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vandpd %xmm2, %xmm1, %xmm1
; X86-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
@@ -556,7 +556,7 @@ define double @fabs_v4f64(<4 x double> %x) nounwind {
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
-; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vmovlps %xmm0, (%esp)
; X86-NEXT: fldl (%esp)
; X86-NEXT: movl %ebp, %esp
@@ -830,8 +830,8 @@ define double @copysign_v4f64(<4 x double> %x, <4 x double> %y) nounwind {
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
-; X86-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1
-; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vorps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovlps %xmm0, (%esp)
; X86-NEXT: fldl (%esp)
@@ -1111,7 +1111,7 @@ define double @round_v4f64(<4 x double> %x) nounwind {
; X86-NEXT: movl %esp, %ebp
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $8, %esp
-; X86-NEXT: vandpd {{\.LCPI.*}}, %xmm0, %xmm1
+; X86-NEXT: vandpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-NEXT: vmovddup {{.*#+}} xmm2 = [4.9999999999999994E-1,4.9999999999999994E-1]
; X86-NEXT: # xmm2 = mem[0,0]
; X86-NEXT: vorpd %xmm1, %xmm2, %xmm1
diff --git a/llvm/test/CodeGen/X86/fast-isel-fneg.ll b/llvm/test/CodeGen/X86/fast-isel-fneg.ll
index d575a277cf0f..28aabc8fadf5 100644
--- a/llvm/test/CodeGen/X86/fast-isel-fneg.ll
+++ b/llvm/test/CodeGen/X86/fast-isel-fneg.ll
@@ -18,7 +18,7 @@ define double @fneg_f64(double %x) nounwind {
; SSE2-NEXT: andl $-8, %esp
; SSE2-NEXT: subl $8, %esp
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2-NEXT: movlps %xmm0, (%esp)
; SSE2-NEXT: fldl (%esp)
; SSE2-NEXT: movl %ebp, %esp
@@ -40,7 +40,7 @@ define float @fneg_f32(float %x) nounwind {
; SSE2: # %bb.0:
; SSE2-NEXT: pushl %eax
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2-NEXT: movss %xmm0, (%esp)
; SSE2-NEXT: flds (%esp)
; SSE2-NEXT: popl %eax
@@ -65,7 +65,7 @@ define void @fneg_f64_mem(double* %x, double* %y) nounwind {
; SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; SSE2-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; SSE2-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2-NEXT: movsd %xmm0, (%eax)
; SSE2-NEXT: retl
%a = load double, double* %x
diff --git a/llvm/test/CodeGen/X86/fildll.ll b/llvm/test/CodeGen/X86/fildll.ll
index a91974fbdfa3..8499397fed2d 100644
--- a/llvm/test/CodeGen/X86/fildll.ll
+++ b/llvm/test/CodeGen/X86/fildll.ll
@@ -36,7 +36,7 @@ define fastcc double @uint64_to_fp(i64 %X) {
; CHECK-NEXT: movl %ecx, (%esp)
; CHECK-NEXT: shrl $31, %edx
; CHECK-NEXT: fildll (%esp)
-; CHECK-NEXT: fadds {{\.LCPI.*}}(,%edx,4)
+; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%edx,4)
; CHECK-NEXT: fstpl {{[0-9]+}}(%esp)
; CHECK-NEXT: fldl {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %ebp, %esp
diff --git a/llvm/test/CodeGen/X86/fma-scalar-combine.ll b/llvm/test/CodeGen/X86/fma-scalar-combine.ll
index 7d9169ca2552..02e96ea73ceb 100644
--- a/llvm/test/CodeGen/X86/fma-scalar-combine.ll
+++ b/llvm/test/CodeGen/X86/fma-scalar-combine.ll
@@ -548,9 +548,9 @@ define float @fma_const_fmul(float %x) {
; CHECK-LABEL: fma_const_fmul:
; CHECK: # %bb.0:
; CHECK-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x59,0x0d,A,A,A,A]
-; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: vfmadd132ss {{.*}}(%rip), %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0x99,0x05,A,A,A,A]
-; CHECK-NEXT: # fixup A - offset: 5, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; CHECK-NEXT: # fixup A - offset: 5, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; CHECK-NEXT: # xmm0 = (xmm0 * mem) + xmm1
; CHECK-NEXT: retq # encoding: [0xc3]
%mul1 = fmul contract float %x, 10.0
diff --git a/llvm/test/CodeGen/X86/fmf-flags.ll b/llvm/test/CodeGen/X86/fmf-flags.ll
index c8a85bdd390c..a4285ebc888b 100644
--- a/llvm/test/CodeGen/X86/fmf-flags.ll
+++ b/llvm/test/CodeGen/X86/fmf-flags.ll
@@ -38,7 +38,7 @@ define dso_local float @fast_fmuladd_opts(float %a , float %b , float %c) {
; X86-LABEL: fast_fmuladd_opts:
; X86: # %bb.0:
; X86-NEXT: flds {{[0-9]+}}(%esp)
-; X86-NEXT: fmuls {{\.LCPI.*}}
+; X86-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: retl
%res = call fast float @llvm.fmuladd.f32(float %a, float 2.0, float %a)
ret float %res
@@ -61,9 +61,9 @@ define dso_local double @not_so_fast_mul_add(double %x) {
; X86: # %bb.0:
; X86-NEXT: fldl {{[0-9]+}}(%esp)
; X86-NEXT: fld %st(0)
-; X86-NEXT: fmull {{\.LCPI.*}}
+; X86-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fxch %st(1)
-; X86-NEXT: fmull {{\.LCPI.*}}
+; X86-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fxch %st(1)
; X86-NEXT: fstpl mul1
; X86-NEXT: retl
@@ -127,7 +127,7 @@ define dso_local float @div_arcp_by_const(half %x) {
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: calll __gnu_h2f_ieee
-; X86-NEXT: fmuls {{\.LCPI.*}}
+; X86-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fstps (%esp)
; X86-NEXT: calll __gnu_f2h_ieee
; X86-NEXT: movzwl %ax, %eax
diff --git a/llvm/test/CodeGen/X86/fp-cvt.ll b/llvm/test/CodeGen/X86/fp-cvt.ll
index a7e20c5e8c5e..facb84d6f104 100644
--- a/llvm/test/CodeGen/X86/fp-cvt.ll
+++ b/llvm/test/CodeGen/X86/fp-cvt.ll
@@ -443,7 +443,7 @@ define i64 @fptoui_i64_fp80(x86_fp80 %a0) nounwind {
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: fldt 8(%ebp)
-; X86-NEXT: flds {{\.LCPI.*}}
+; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fucom %st(1)
; X86-NEXT: fnstsw %ax
; X86-NEXT: xorl %edx, %edx
@@ -523,7 +523,7 @@ define i64 @fptoui_i64_fp80_ld(x86_fp80 *%a0) nounwind {
; X86-NEXT: subl $16, %esp
; X86-NEXT: movl 8(%ebp), %eax
; X86-NEXT: fldt (%eax)
-; X86-NEXT: flds {{\.LCPI.*}}
+; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fucom %st(1)
; X86-NEXT: fnstsw %ax
; X86-NEXT: xorl %edx, %edx
@@ -825,7 +825,7 @@ define x86_fp80 @uitofp_fp80_i64(i64 %a0) nounwind {
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: shrl $31, %ecx
; X86-NEXT: fildll (%esp)
-; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
@@ -837,7 +837,7 @@ define x86_fp80 @uitofp_fp80_i64(i64 %a0) nounwind {
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: sets %al
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
-; X64-NEXT: fadds {{\.LCPI.*}}(,%rax,4)
+; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4)
; X64-NEXT: retq
%1 = uitofp i64 %a0 to x86_fp80
ret x86_fp80 %1
@@ -857,7 +857,7 @@ define x86_fp80 @uitofp_fp80_i64_ld(i64 *%a0) nounwind {
; X86-NEXT: movl %ecx, (%esp)
; X86-NEXT: shrl $31, %eax
; X86-NEXT: fildll (%esp)
-; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
; X86-NEXT: retl
@@ -870,7 +870,7 @@ define x86_fp80 @uitofp_fp80_i64_ld(i64 *%a0) nounwind {
; X64-NEXT: testq %rax, %rax
; X64-NEXT: sets %cl
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
-; X64-NEXT: fadds {{\.LCPI.*}}(,%rcx,4)
+; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rcx,4)
; X64-NEXT: retq
%1 = load i64, i64 *%a0
%2 = uitofp i64 %1 to x86_fp80
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index 7fe25c97d2c2..e2fdb904dabc 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -18,7 +18,7 @@ define double @f1() #0 {
; X87-LABEL: f1:
; X87: # %bb.0: # %entry
; X87-NEXT: fld1
-; X87-NEXT: fdivs {{\.LCPI.*}}
+; X87-NEXT: fdivs {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: wait
; X87-NEXT: retl
;
@@ -27,7 +27,7 @@ define double @f1() #0 {
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE-NEXT: divsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: divsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp)
; X86-SSE-NEXT: wait
@@ -209,7 +209,7 @@ define double @f4(i32 %n, double %a) #0 {
; X86-SSE-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: jle .LBB3_2
; X86-SSE-NEXT: # %bb.1: # %if.then
-; X86-SSE-NEXT: addsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: addsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: .LBB3_2: # %if.end
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp)
@@ -255,7 +255,7 @@ if.end:
define double @f5() #0 {
; X87-LABEL: f5:
; X87: # %bb.0: # %entry
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fsqrt
; X87-NEXT: wait
; X87-NEXT: retl
@@ -297,9 +297,9 @@ define double @f6() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $28, %esp
; X87-NEXT: .cfi_def_cfa_offset 32
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl {{[0-9]+}}(%esp)
-; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll pow
@@ -355,7 +355,7 @@ define double @f7() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: movl $3, {{[0-9]+}}(%esp)
@@ -411,7 +411,7 @@ define double @f8() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll sin
@@ -462,7 +462,7 @@ define double @f9() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll cos
@@ -513,7 +513,7 @@ define double @f10() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll exp
@@ -564,7 +564,7 @@ define double @f11() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll exp2
@@ -615,7 +615,7 @@ define double @f12() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll log
@@ -666,7 +666,7 @@ define double @f13() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll log10
@@ -717,7 +717,7 @@ define double @f14() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll log2
@@ -768,7 +768,7 @@ define double @f15() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll rint
@@ -816,7 +816,7 @@ define double @f16() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $12, %esp
; X87-NEXT: .cfi_def_cfa_offset 16
-; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl (%esp)
; X87-NEXT: wait
; X87-NEXT: calll nearbyint
@@ -863,7 +863,7 @@ define double @f19() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: subl $28, %esp
; X87-NEXT: .cfi_def_cfa_offset 32
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: wait
; X87-NEXT: movl $1072693248, {{[0-9]+}}(%esp) # imm = 0x3FF00000
@@ -1356,7 +1356,7 @@ define i64 @f20u64(double %x) #0 {
; X87-NEXT: subl $20, %esp
; X87-NEXT: .cfi_def_cfa_offset 24
; X87-NEXT: fldl {{[0-9]+}}(%esp)
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: wait
; X87-NEXT: xorl %edx, %edx
; X87-NEXT: fcomi %st(1), %st
@@ -1541,7 +1541,7 @@ define float @f21() #0 {
; X87: # %bb.0: # %entry
; X87-NEXT: pushl %eax
; X87-NEXT: .cfi_def_cfa_offset 8
-; X87-NEXT: fldl {{\.LCPI.*}}
+; X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fstps (%esp)
; X87-NEXT: flds (%esp)
; X87-NEXT: wait
@@ -2437,8 +2437,8 @@ define double @uifdi(i32 %x) #0 {
; X86-SSE-NEXT: subl $12, %esp
; X86-SSE-NEXT: .cfi_def_cfa_offset 16
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: orpd {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movsd %xmm0, (%esp)
; X86-SSE-NEXT: fldl (%esp)
; X86-SSE-NEXT: wait
@@ -2480,7 +2480,7 @@ define double @uifdl(i64 %x) #0 {
; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp)
-; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: wait
@@ -2497,7 +2497,7 @@ define double @uifdl(i64 %x) #0 {
; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: shrl $31, %eax
; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; X86-SSE-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-SSE-NEXT: fstpl {{[0-9]+}}(%esp)
; X86-SSE-NEXT: wait
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -2658,8 +2658,8 @@ define float @uiffi(i32 %x) #0 {
; X86-SSE-NEXT: pushl %eax
; X86-SSE-NEXT: .cfi_def_cfa_offset 8
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: orpd {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtsd2ss %xmm0, %xmm0
; X86-SSE-NEXT: movss %xmm0, (%esp)
; X86-SSE-NEXT: flds (%esp)
@@ -2702,7 +2702,7 @@ define float @uiffl(i64 %x) #0 {
; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll {{[0-9]+}}(%esp)
-; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstps {{[0-9]+}}(%esp)
; X87-NEXT: flds {{[0-9]+}}(%esp)
; X87-NEXT: wait
@@ -2719,7 +2719,7 @@ define float @uiffl(i64 %x) #0 {
; X86-SSE-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE-NEXT: shrl $31, %eax
; X86-SSE-NEXT: fildll {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; X86-SSE-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
; X86-SSE-NEXT: wait
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
diff --git a/llvm/test/CodeGen/X86/fp-stack-set-st1.ll b/llvm/test/CodeGen/X86/fp-stack-set-st1.ll
index 065f84d34b8a..1e41c869332b 100644
--- a/llvm/test/CodeGen/X86/fp-stack-set-st1.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-set-st1.ll
@@ -4,8 +4,8 @@
define i32 @main() nounwind {
; CHECK-LABEL: main:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: fldl {{\.LCPI.*}}
-; CHECK-NEXT: fldl {{\.LCPI.*}}
+; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
+; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fxch %st(1)
; CHECK-NEXT: #APP
; CHECK-NEXT: fmul %st(1), %st
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
index 156ee617e72a..478f2796c5a9 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-fptoint.ll
@@ -679,7 +679,7 @@ define i64 @fptoui_f32toi64(float %x) #0 {
; X87-NEXT: andl $-8, %esp
; X87-NEXT: subl $16, %esp
; X87-NEXT: flds 8(%ebp)
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fcom %st(1)
; X87-NEXT: wait
; X87-NEXT: fnstsw %ax
@@ -1319,7 +1319,7 @@ define i64 @fptoui_f64toi64(double %x) #0 {
; X87-NEXT: andl $-8, %esp
; X87-NEXT: subl $16, %esp
; X87-NEXT: fldl 8(%ebp)
-; X87-NEXT: flds {{\.LCPI.*}}
+; X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-NEXT: fcom %st(1)
; X87-NEXT: wait
; X87-NEXT: fnstsw %ax
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll b/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll
index 49e238df4aa2..b060f3cc8067 100644
--- a/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar-inttofp.ll
@@ -488,8 +488,8 @@ define float @uitofp_i32tof32(i32 %x) #0 {
; SSE-X86-NEXT: pushl %eax
; SSE-X86-NEXT: .cfi_def_cfa_offset 8
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-X86-NEXT: orpd {{\.LCPI.*}}, %xmm0
-; SSE-X86-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; SSE-X86-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE-X86-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-X86-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE-X86-NEXT: movss %xmm0, (%esp)
; SSE-X86-NEXT: flds (%esp)
@@ -509,8 +509,8 @@ define float @uitofp_i32tof32(i32 %x) #0 {
; AVX1-X86-NEXT: pushl %eax
; AVX1-X86-NEXT: .cfi_def_cfa_offset 8
; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-X86-NEXT: vorpd {{\.LCPI.*}}, %xmm0, %xmm0
-; AVX1-X86-NEXT: vsubsd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-X86-NEXT: vorpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; AVX1-X86-NEXT: vsubsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-X86-NEXT: vcvtsd2ss %xmm0, %xmm0, %xmm0
; AVX1-X86-NEXT: vmovss %xmm0, (%esp)
; AVX1-X86-NEXT: flds (%esp)
@@ -581,7 +581,7 @@ define float @uitofp_i64tof32(i64 %x) #0 {
; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; SSE-X86-NEXT: shrl $31, %eax
; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-X86-NEXT: fstps {{[0-9]+}}(%esp)
; SSE-X86-NEXT: wait
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -623,7 +623,7 @@ define float @uitofp_i64tof32(i64 %x) #0 {
; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-X86-NEXT: shrl $31, %eax
; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-X86-NEXT: fstps {{[0-9]+}}(%esp)
; AVX-X86-NEXT: wait
; AVX-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -671,7 +671,7 @@ define float @uitofp_i64tof32(i64 %x) #0 {
; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll {{[0-9]+}}(%esp)
-; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstps {{[0-9]+}}(%esp)
; X87-NEXT: flds {{[0-9]+}}(%esp)
; X87-NEXT: wait
@@ -1164,8 +1164,8 @@ define double @uitofp_i32tof64(i32 %x) #0 {
; SSE-X86-NEXT: andl $-8, %esp
; SSE-X86-NEXT: subl $8, %esp
; SSE-X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE-X86-NEXT: orpd {{\.LCPI.*}}, %xmm0
-; SSE-X86-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; SSE-X86-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE-X86-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-X86-NEXT: movsd %xmm0, (%esp)
; SSE-X86-NEXT: fldl (%esp)
; SSE-X86-NEXT: wait
@@ -1190,8 +1190,8 @@ define double @uitofp_i32tof64(i32 %x) #0 {
; AVX1-X86-NEXT: andl $-8, %esp
; AVX1-X86-NEXT: subl $8, %esp
; AVX1-X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX1-X86-NEXT: vorpd {{\.LCPI.*}}, %xmm0, %xmm0
-; AVX1-X86-NEXT: vsubsd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-X86-NEXT: vorpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; AVX1-X86-NEXT: vsubsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-X86-NEXT: vmovsd %xmm0, (%esp)
; AVX1-X86-NEXT: fldl (%esp)
; AVX1-X86-NEXT: wait
@@ -1268,7 +1268,7 @@ define double @uitofp_i64tof64(i64 %x) #0 {
; SSE-X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; SSE-X86-NEXT: shrl $31, %eax
; SSE-X86-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-X86-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE-X86-NEXT: wait
; SSE-X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -1310,7 +1310,7 @@ define double @uitofp_i64tof64(i64 %x) #0 {
; AVX-X86-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX-X86-NEXT: shrl $31, %eax
; AVX-X86-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-X86-NEXT: fstpl {{[0-9]+}}(%esp)
; AVX-X86-NEXT: wait
; AVX-X86-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1358,7 +1358,7 @@ define double @uitofp_i64tof64(i64 %x) #0 {
; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp)
-; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: wait
diff --git a/llvm/test/CodeGen/X86/fp128-cast.ll b/llvm/test/CodeGen/X86/fp128-cast.ll
index af269449486f..ef5c7711aaca 100644
--- a/llvm/test/CodeGen/X86/fp128-cast.ll
+++ b/llvm/test/CodeGen/X86/fp128-cast.ll
@@ -1287,8 +1287,8 @@ define fp128 @TestTruncCopysign(fp128 %x, i32 %n) nounwind {
; X32-NEXT: addl $16, %esp
; X32-NEXT: fstpl {{[0-9]+}}(%esp)
; X32-NEXT: testb $-128, {{[0-9]+}}(%esp)
-; X32-NEXT: flds {{\.LCPI.*}}
-; X32-NEXT: flds {{\.LCPI.*}}
+; X32-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
+; X32-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X32-NEXT: jne .LBB26_3
; X32-NEXT: # %bb.2: # %if.then
; X32-NEXT: fstp %st(1)
diff --git a/llvm/test/CodeGen/X86/fp128-i128.ll b/llvm/test/CodeGen/X86/fp128-i128.ll
index 6a70bc24fc6c..a9e932304dad 100644
--- a/llvm/test/CodeGen/X86/fp128-i128.ll
+++ b/llvm/test/CodeGen/X86/fp128-i128.ll
@@ -144,7 +144,7 @@ define fp128 @TestI128_1(fp128 %x) #0 {
; SSE-NEXT: testl %eax, %eax
; SSE-NEXT: sets %cl
; SSE-NEXT: shlq $4, %rcx
-; SSE-NEXT: movaps {{\.LCPI.*}}(%rcx), %xmm0
+; SSE-NEXT: movaps {{\.LCPI[0-9]+_[0-9]+}}(%rcx), %xmm0
; SSE-NEXT: addq $40, %rsp
; SSE-NEXT: retq
;
@@ -164,7 +164,7 @@ define fp128 @TestI128_1(fp128 %x) #0 {
; AVX-NEXT: testl %eax, %eax
; AVX-NEXT: sets %cl
; AVX-NEXT: shlq $4, %rcx
-; AVX-NEXT: vmovaps {{\.LCPI.*}}(%rcx), %xmm0
+; AVX-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}(%rcx), %xmm0
; AVX-NEXT: addq $40, %rsp
; AVX-NEXT: retq
entry:
diff --git a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
index e55e3903c0dc..da638c2e4ff2 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-scalar.ll
@@ -588,7 +588,7 @@ define i64 @fp80_to_uint64(x86_fp80 %x) #0 {
; X86-NEXT: andl $-8, %esp
; X86-NEXT: subl $16, %esp
; X86-NEXT: fldt 8(%ebp)
-; X86-NEXT: flds {{\.LCPI.*}}
+; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-NEXT: fcom %st(1)
; X86-NEXT: wait
; X86-NEXT: fnstsw %ax
@@ -905,7 +905,7 @@ define x86_fp80 @uint64_to_fp80(i64 %x) #0 {
; X86-NEXT: movl %eax, (%esp)
; X86-NEXT: shrl $31, %ecx
; X86-NEXT: fildll (%esp)
-; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X86-NEXT: wait
; X86-NEXT: movl %ebp, %esp
; X86-NEXT: popl %ebp
@@ -919,7 +919,7 @@ define x86_fp80 @uint64_to_fp80(i64 %x) #0 {
; X64-NEXT: testq %rdi, %rdi
; X64-NEXT: sets %al
; X64-NEXT: fildll -{{[0-9]+}}(%rsp)
-; X64-NEXT: fadds {{\.LCPI.*}}(,%rax,4)
+; X64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4)
; X64-NEXT: wait
; X64-NEXT: retq
%result = call x86_fp80 @llvm.experimental.constrained.uitofp.f80.i64(i64 %x,
diff --git a/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll b/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll
index 23035a2f7e40..37d8ace804e5 100644
--- a/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/X86/fptosi-sat-scalar.ll
@@ -105,7 +105,7 @@ define i8 @test_signed_i8_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -117,7 +117,7 @@ define i8 @test_signed_i8_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB1_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -176,7 +176,7 @@ define i13 @test_signed_i13_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -188,7 +188,7 @@ define i13 @test_signed_i13_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB2_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -248,7 +248,7 @@ define i16 @test_signed_i16_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -260,7 +260,7 @@ define i16 @test_signed_i16_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB3_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -320,7 +320,7 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -332,7 +332,7 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB4_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -362,8 +362,8 @@ define i19 @test_signed_i19_f32(float %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomiss %xmm0, %xmm0
-; X86-SSE-NEXT: maxss {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: maxss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: retl
@@ -393,7 +393,7 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -405,7 +405,7 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB5_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -434,7 +434,7 @@ define i32 @test_signed_i32_f32(float %f) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: cvttss2si %xmm0, %eax
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %ecx
; X86-SSE-NEXT: xorl %eax, %eax
@@ -471,7 +471,7 @@ define i50 @test_signed_i50_f32(float %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -489,7 +489,7 @@ define i50 @test_signed_i50_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB6_4:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -537,12 +537,12 @@ define i50 @test_signed_i50_f32(float %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -586,7 +586,7 @@ define i64 @test_signed_i64_f32(float %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -604,7 +604,7 @@ define i64 @test_signed_i64_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB7_4:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -652,12 +652,12 @@ define i64 @test_signed_i64_f32(float %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -695,7 +695,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp
@@ -723,7 +723,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB8_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -789,7 +789,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %ebp, %ebp
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-8, %ebx
; X86-SSE-NEXT: movl $0, %ecx
; X86-SSE-NEXT: movl $0, %edx
@@ -801,7 +801,7 @@ define i100 @test_signed_i100_f32(float %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB8_2:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmoval %eax, %edi
; X86-SSE-NEXT: cmoval %eax, %edx
@@ -864,7 +864,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp
@@ -888,7 +888,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB9_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -964,7 +964,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: cmovbl %ecx, %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -973,7 +973,7 @@ define i128 @test_signed_i128_f32(float %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %ebx, %ebp
; X86-SSE-NEXT: movl $-1, %ebx
@@ -1125,7 +1125,7 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1137,7 +1137,7 @@ define i8 @test_signed_i8_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB11_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1196,7 +1196,7 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1208,7 +1208,7 @@ define i13 @test_signed_i13_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB12_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1268,7 +1268,7 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1280,7 +1280,7 @@ define i16 @test_signed_i16_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB13_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1340,7 +1340,7 @@ define i19 @test_signed_i19_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1352,7 +1352,7 @@ define i19 @test_signed_i19_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB14_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1382,8 +1382,8 @@ define i19 @test_signed_i19_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomisd %xmm0, %xmm0
-; X86-SSE-NEXT: maxsd {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: maxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: retl
@@ -1413,7 +1413,7 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1425,7 +1425,7 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB15_2:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1455,8 +1455,8 @@ define i32 @test_signed_i32_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomisd %xmm0, %xmm0
-; X86-SSE-NEXT: maxsd {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: maxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: retl
@@ -1489,7 +1489,7 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1507,7 +1507,7 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB16_4:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1555,12 +1555,12 @@ define i50 @test_signed_i50_f64(double %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -1600,7 +1600,7 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1618,7 +1618,7 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB17_4:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1666,12 +1666,12 @@ define i64 @test_signed_i64_f64(double %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -1709,7 +1709,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-X87-NEXT: fstl {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill
; X86-X87-NEXT: fucompp
@@ -1737,7 +1737,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB18_6:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1803,7 +1803,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorl %ebp, %ebp
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-8, %ebx
; X86-SSE-NEXT: movl $0, %ecx
; X86-SSE-NEXT: movl $0, %edx
@@ -1815,7 +1815,7 @@ define i100 @test_signed_i100_f64(double %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB18_2:
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmoval %eax, %edi
; X86-SSE-NEXT: cmoval %eax, %edx
@@ -1878,7 +1878,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-X87-NEXT: fstl {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fstl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Spill
; X86-X87-NEXT: fucompp
@@ -1902,7 +1902,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB19_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -1978,7 +1978,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: cmovbl %ecx, %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -1987,7 +1987,7 @@ define i128 @test_signed_i128_f64(double %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %ebx, %ebp
; X86-SSE-NEXT: movl $-1, %ebx
@@ -2153,7 +2153,7 @@ define i8 @test_signed_i8_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2165,7 +2165,7 @@ define i8 @test_signed_i8_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB21_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2236,7 +2236,7 @@ define i13 @test_signed_i13_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2248,7 +2248,7 @@ define i13 @test_signed_i13_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB22_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2320,7 +2320,7 @@ define i16 @test_signed_i16_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2332,7 +2332,7 @@ define i16 @test_signed_i16_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB23_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2404,7 +2404,7 @@ define i19 @test_signed_i19_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2416,7 +2416,7 @@ define i19 @test_signed_i19_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB24_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2451,8 +2451,8 @@ define i19 @test_signed_i19_f16(half %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: ucomiss %xmm0, %xmm0
-; X86-SSE-NEXT: maxss {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: maxss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %ecx
; X86-SSE-NEXT: cmovnpl %ecx, %eax
; X86-SSE-NEXT: addl $12, %esp
@@ -2489,7 +2489,7 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2501,7 +2501,7 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB25_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2535,7 +2535,7 @@ define i32 @test_signed_i32_f16(half %f) nounwind {
; X86-SSE-NEXT: fstps {{[0-9]+}}(%esp)
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: cvttss2si %xmm0, %eax
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ecx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %ecx
; X86-SSE-NEXT: xorl %eax, %eax
@@ -2579,7 +2579,7 @@ define i50 @test_signed_i50_f16(half %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2597,7 +2597,7 @@ define i50 @test_signed_i50_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB26_4:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2649,12 +2649,12 @@ define i50 @test_signed_i50_f16(half %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $131071, %edx # imm = 0x1FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -2704,7 +2704,7 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2722,7 +2722,7 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB27_4:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2774,12 +2774,12 @@ define i64 @test_signed_i64_f16(half %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -2823,7 +2823,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp
@@ -2851,7 +2851,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB28_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -2923,7 +2923,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload
; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorl %ebp, %ebp
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-8, %ebx
; X86-SSE-NEXT: movl $0, %ecx
; X86-SSE-NEXT: movl $0, %edx
@@ -2935,7 +2935,7 @@ define i100 @test_signed_i100_f16(half %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB28_2:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmoval %eax, %edi
; X86-SSE-NEXT: cmoval %eax, %edx
@@ -3002,7 +3002,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: fsts {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fsts {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-X87-NEXT: fucompp
@@ -3026,7 +3026,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB29_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3108,7 +3108,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: movss {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 4-byte Reload
; X86-SSE-NEXT: # xmm0 = mem[0],zero,zero,zero
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: cmovbl %ecx, %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
@@ -3117,7 +3117,7 @@ define i128 @test_signed_i128_f16(half %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $2147483647, %ebp # imm = 0x7FFFFFFF
; X86-SSE-NEXT: cmovbel %ebx, %ebp
; X86-SSE-NEXT: movl $-1, %ebx
@@ -3311,7 +3311,7 @@ define i8 @test_signed_i8_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3323,7 +3323,7 @@ define i8 @test_signed_i8_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movb {{[0-9]+}}(%esp), %dl
; X86-X87-NEXT: .LBB31_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3359,13 +3359,13 @@ define i8 @test_signed_i8_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fists {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
; X86-SSE-NEXT: movl $128, %ecx
; X86-SSE-NEXT: cmovael %eax, %ecx
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3422,7 +3422,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3434,7 +3434,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB32_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3470,7 +3470,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fists {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3479,7 +3479,7 @@ define i13 @test_signed_i13_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB32_2:
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3538,7 +3538,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fists {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3550,7 +3550,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB33_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3586,7 +3586,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fists {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3595,7 +3595,7 @@ define i16 @test_signed_i16_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB33_2:
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3654,7 +3654,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3666,7 +3666,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB34_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3702,7 +3702,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw (%esp)
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3711,7 +3711,7 @@ define i19 @test_signed_i19_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB34_2:
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3768,7 +3768,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-X87-NEXT: fistl {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3780,7 +3780,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB35_2:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3816,7 +3816,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fistl {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw (%esp)
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3825,7 +3825,7 @@ define i32 @test_signed_i32_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB35_2:
-; X86-SSE-NEXT: fldl {{\.LCPI.*}}
+; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3885,7 +3885,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3903,7 +3903,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB36_4:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -3950,7 +3950,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -3958,7 +3958,7 @@ define i50 @test_signed_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-131072, %eax # imm = 0xFFFE0000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: fldl {{\.LCPI.*}}
+; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -4024,7 +4024,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fld %st(0)
; X86-X87-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-X87-NEXT: fldcw {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -4042,7 +4042,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.3:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB37_4:
-; X86-X87-NEXT: fldt {{\.LCPI.*}}
+; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -4089,7 +4089,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE-NEXT: fldcw {{[0-9]+}}(%esp)
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -4097,7 +4097,7 @@ define i64 @test_signed_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %esi
; X86-SSE-NEXT: movl $-2147483648, %eax # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %eax
-; X86-SSE-NEXT: fldt {{\.LCPI.*}}
+; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -4161,7 +4161,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fld %st(1)
; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-X87-NEXT: fxch %st(1)
@@ -4190,7 +4190,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-X87-NEXT: .LBB38_6:
-; X86-X87-NEXT: fldt {{\.LCPI.*}}
+; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -4258,7 +4258,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-SSE-NEXT: xorl %ebp, %ebp
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -4273,7 +4273,7 @@ define i100 @test_signed_i100_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB38_2:
-; X86-SSE-NEXT: fldt {{\.LCPI.*}}
+; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -4347,7 +4347,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: fstpt {{[0-9]+}}(%esp)
; X86-X87-NEXT: leal {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fld %st(1)
; X86-X87-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
; X86-X87-NEXT: fxch %st(1)
@@ -4372,7 +4372,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB39_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
-; X86-X87-NEXT: fldt {{\.LCPI.*}}
+; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fstp %st(1)
@@ -4449,7 +4449,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: calll __fixxfti
; X86-SSE-NEXT: subl $4, %esp
; X86-SSE-NEXT: xorl %ecx, %ecx
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
@@ -4461,7 +4461,7 @@ define i128 @test_signed_i128_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: cmovbl %ecx, %edi
; X86-SSE-NEXT: movl $-2147483648, %ebx # imm = 0x80000000
; X86-SSE-NEXT: cmovael {{[0-9]+}}(%esp), %ebx
-; X86-SSE-NEXT: fldt {{\.LCPI.*}}
+; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
diff --git a/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll b/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll
index 294189815c49..18bc6400c583 100644
--- a/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll
+++ b/llvm/test/CodeGen/X86/fptoui-sat-scalar.ll
@@ -107,7 +107,7 @@ define i8 @test_unsigned_i8_f32(float %f) nounwind {
; X86-X87-NEXT: .LBB1_1:
; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB1_3:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -168,7 +168,7 @@ define i13 @test_unsigned_i13_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB2_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -230,7 +230,7 @@ define i16 @test_unsigned_i16_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB3_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -293,7 +293,7 @@ define i19 @test_unsigned_i19_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB4_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -312,7 +312,7 @@ define i19 @test_unsigned_i19_f32(float %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: maxss %xmm1, %xmm0
-; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %eax
; X86-SSE-NEXT: retl
;
@@ -352,7 +352,7 @@ define i32 @test_unsigned_i32_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB5_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -381,7 +381,7 @@ define i32 @test_unsigned_i32_f32(float %f) nounwind {
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: ucomiss %xmm1, %xmm0
; X86-SSE-NEXT: cmovael %ecx, %edx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmovbel %edx, %eax
; X86-SSE-NEXT: retl
@@ -407,7 +407,7 @@ define i50 @test_unsigned_i50_f32(float %f) nounwind {
; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $16, %esp
; X86-X87-NEXT: flds {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx
@@ -449,7 +449,7 @@ define i50 @test_unsigned_i50_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB6_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -503,7 +503,7 @@ define i50 @test_unsigned_i50_f32(float %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: .LBB6_4:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -534,7 +534,7 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind {
; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $20, %esp
; X86-X87-NEXT: flds {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx
@@ -576,7 +576,7 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB7_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -627,7 +627,7 @@ define i64 @test_unsigned_i64_f32(float %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB7_4:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ecx
; X86-SSE-NEXT: cmoval %ecx, %edx
; X86-SSE-NEXT: cmoval %ecx, %eax
@@ -698,7 +698,7 @@ define i100 @test_unsigned_i100_f32(float %f) nounwind {
; X86-X87-NEXT: .LBB8_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -759,7 +759,7 @@ define i100 @test_unsigned_i100_f32(float %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB8_2:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $15, %ebx
; X86-SSE-NEXT: cmovbel %edi, %ebx
; X86-SSE-NEXT: movl $-1, %edi
@@ -843,7 +843,7 @@ define i128 @test_unsigned_i128_f32(float %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB9_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -900,7 +900,7 @@ define i128 @test_unsigned_i128_f32(float %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB9_2:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ebx
; X86-SSE-NEXT: cmoval %ebx, %edi
; X86-SSE-NEXT: cmoval %ebx, %edx
@@ -1043,7 +1043,7 @@ define i8 @test_unsigned_i8_f64(double %f) nounwind {
; X86-X87-NEXT: .LBB11_1:
; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB11_3:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1104,7 +1104,7 @@ define i13 @test_unsigned_i13_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB12_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1166,7 +1166,7 @@ define i16 @test_unsigned_i16_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB13_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1229,7 +1229,7 @@ define i19 @test_unsigned_i19_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB14_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1248,7 +1248,7 @@ define i19 @test_unsigned_i19_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: maxsd %xmm1, %xmm0
-; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %eax
; X86-SSE-NEXT: retl
;
@@ -1288,7 +1288,7 @@ define i32 @test_unsigned_i32_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB15_2:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1307,7 +1307,7 @@ define i32 @test_unsigned_i32_f64(double %f) nounwind {
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE-NEXT: xorpd %xmm1, %xmm1
; X86-SSE-NEXT: maxsd %xmm1, %xmm0
-; X86-SSE-NEXT: minsd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: minsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttsd2si %xmm0, %ecx
; X86-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE-NEXT: movapd %xmm0, %xmm2
@@ -1337,7 +1337,7 @@ define i50 @test_unsigned_i50_f64(double %f) nounwind {
; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $16, %esp
; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx
@@ -1379,7 +1379,7 @@ define i50 @test_unsigned_i50_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB16_6:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1433,7 +1433,7 @@ define i50 @test_unsigned_i50_f64(double %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: .LBB16_4:
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -1460,7 +1460,7 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind {
; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $20, %esp
; X86-X87-NEXT: fldl {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx
@@ -1502,7 +1502,7 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB17_6:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1553,7 +1553,7 @@ define i64 @test_unsigned_i64_f64(double %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB17_4:
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ecx
; X86-SSE-NEXT: cmoval %ecx, %edx
; X86-SSE-NEXT: cmoval %ecx, %eax
@@ -1624,7 +1624,7 @@ define i100 @test_unsigned_i100_f64(double %f) nounwind {
; X86-X87-NEXT: .LBB18_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1685,7 +1685,7 @@ define i100 @test_unsigned_i100_f64(double %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB18_2:
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $15, %ebx
; X86-SSE-NEXT: cmovbel %edi, %ebx
; X86-SSE-NEXT: movl $-1, %edi
@@ -1769,7 +1769,7 @@ define i128 @test_unsigned_i128_f64(double %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB19_6:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldl {{[-0-9]+}}(%e{{[sb]}}p) # 8-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -1826,7 +1826,7 @@ define i128 @test_unsigned_i128_f64(double %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB19_2:
-; X86-SSE-NEXT: ucomisd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomisd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ebx
; X86-SSE-NEXT: cmoval %ebx, %edi
; X86-SSE-NEXT: cmoval %ebx, %edx
@@ -1983,7 +1983,7 @@ define i8 @test_unsigned_i8_f16(half %f) nounwind {
; X86-X87-NEXT: .LBB21_1:
; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB21_3:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2056,7 +2056,7 @@ define i13 @test_unsigned_i13_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB22_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2130,7 +2130,7 @@ define i16 @test_unsigned_i16_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB23_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2205,7 +2205,7 @@ define i19 @test_unsigned_i19_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB24_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2229,7 +2229,7 @@ define i19 @test_unsigned_i19_f16(half %f) nounwind {
; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: maxss %xmm1, %xmm0
-; X86-SSE-NEXT: minss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: minss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvttss2si %xmm0, %eax
; X86-SSE-NEXT: addl $12, %esp
; X86-SSE-NEXT: retl
@@ -2276,7 +2276,7 @@ define i32 @test_unsigned_i32_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB25_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2310,7 +2310,7 @@ define i32 @test_unsigned_i32_f16(half %f) nounwind {
; X86-SSE-NEXT: xorps %xmm1, %xmm1
; X86-SSE-NEXT: ucomiss %xmm1, %xmm0
; X86-SSE-NEXT: cmovael %ecx, %edx
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %eax
; X86-SSE-NEXT: cmovbel %edx, %eax
; X86-SSE-NEXT: addl $12, %esp
@@ -2343,7 +2343,7 @@ define i50 @test_unsigned_i50_f16(half %f) nounwind {
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: calll __gnu_h2f_ieee
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
@@ -2385,7 +2385,7 @@ define i50 @test_unsigned_i50_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB26_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2443,7 +2443,7 @@ define i50 @test_unsigned_i50_f16(half %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-SSE-NEXT: .LBB26_4:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $262143, %edx # imm = 0x3FFFF
; X86-SSE-NEXT: cmovbel %eax, %edx
; X86-SSE-NEXT: movl $-1, %eax
@@ -2480,7 +2480,7 @@ define i64 @test_unsigned_i64_f16(half %f) nounwind {
; X86-X87-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-X87-NEXT: movl %eax, (%esp)
; X86-X87-NEXT: calll __gnu_h2f_ieee
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
@@ -2522,7 +2522,7 @@ define i64 @test_unsigned_i64_f16(half %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB27_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2577,7 +2577,7 @@ define i64 @test_unsigned_i64_f16(half %f) nounwind {
; X86-SSE-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: .LBB27_4:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ecx
; X86-SSE-NEXT: cmoval %ecx, %edx
; X86-SSE-NEXT: cmoval %ecx, %eax
@@ -2654,7 +2654,7 @@ define i100 @test_unsigned_i100_f16(half %f) nounwind {
; X86-X87-NEXT: .LBB28_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2721,7 +2721,7 @@ define i100 @test_unsigned_i100_f16(half %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB28_2:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $15, %ebx
; X86-SSE-NEXT: cmovbel %edi, %ebx
; X86-SSE-NEXT: movl $-1, %edi
@@ -2809,7 +2809,7 @@ define i128 @test_unsigned_i128_f16(half %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB29_6:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: flds {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -2872,7 +2872,7 @@ define i128 @test_unsigned_i128_f16(half %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB29_2:
-; X86-SSE-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movl $-1, %ebx
; X86-SSE-NEXT: cmoval %ebx, %edi
; X86-SSE-NEXT: cmoval %ebx, %edx
@@ -3051,7 +3051,7 @@ define i8 @test_unsigned_i8_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: .LBB31_1:
; X86-X87-NEXT: xorl %ecx, %ecx
; X86-X87-NEXT: .LBB31_3:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3083,7 +3083,7 @@ define i8 @test_unsigned_i8_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: fstp %st(1)
; X86-SSE-NEXT: cmovael %eax, %ecx
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -3146,7 +3146,7 @@ define i13 @test_unsigned_i13_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB32_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3181,7 +3181,7 @@ define i13 @test_unsigned_i13_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB32_2:
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -3246,7 +3246,7 @@ define i16 @test_unsigned_i16_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB33_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3281,7 +3281,7 @@ define i16 @test_unsigned_i16_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB33_2:
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -3347,7 +3347,7 @@ define i19 @test_unsigned_i19_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB34_2:
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3382,7 +3382,7 @@ define i19 @test_unsigned_i19_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB34_2:
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -3447,7 +3447,7 @@ define i32 @test_unsigned_i32_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.1:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-X87-NEXT: .LBB35_2:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3482,7 +3482,7 @@ define i32 @test_unsigned_i32_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: # %bb.1:
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: .LBB35_2:
-; X86-SSE-NEXT: fldl {{\.LCPI.*}}
+; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -3528,7 +3528,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $16, %esp
; X86-X87-NEXT: fldt {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx
@@ -3570,7 +3570,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB36_6:
-; X86-X87-NEXT: fldl {{\.LCPI.*}}
+; X86-X87-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3595,7 +3595,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: pushl %esi
; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: xorl %eax, %eax
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: setbe %cl
@@ -3625,7 +3625,7 @@ define i50 @test_unsigned_i50_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl %eax, %esi
; X86-SSE-NEXT: .LBB36_2:
-; X86-SSE-NEXT: fldl {{\.LCPI.*}}
+; X86-SSE-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -3682,7 +3682,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: pushl %esi
; X86-X87-NEXT: subl $20, %esp
; X86-X87-NEXT: fldt {{[0-9]+}}(%esp)
-; X86-X87-NEXT: flds {{\.LCPI.*}}
+; X86-X87-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fucom %st(1)
; X86-X87-NEXT: fnstsw %ax
; X86-X87-NEXT: xorl %ecx, %ecx
@@ -3724,7 +3724,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: # %bb.5:
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-X87-NEXT: .LBB37_6:
-; X86-X87-NEXT: fldt {{\.LCPI.*}}
+; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fxch %st(1)
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3747,7 +3747,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: pushl %ebx
; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: fldt {{[0-9]+}}(%esp)
-; X86-SSE-NEXT: flds {{\.LCPI.*}}
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: xorl %ecx, %ecx
; X86-SSE-NEXT: fucomi %st(1), %st
; X86-SSE-NEXT: setbe %bl
@@ -3777,7 +3777,7 @@ define i64 @test_unsigned_i64_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movl %ecx, %edx
; X86-SSE-NEXT: .LBB37_2:
-; X86-SSE-NEXT: fldt {{\.LCPI.*}}
+; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -3869,7 +3869,7 @@ define i100 @test_unsigned_i100_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: .LBB38_6:
; X86-X87-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-X87-NEXT: fldt {{\.LCPI.*}}
+; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -3933,7 +3933,7 @@ define i100 @test_unsigned_i100_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB38_2:
-; X86-SSE-NEXT: fldt {{\.LCPI.*}}
+; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
@@ -4028,7 +4028,7 @@ define i128 @test_unsigned_i128_f80(x86_fp80 %f) nounwind {
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-X87-NEXT: .LBB39_6:
-; X86-X87-NEXT: fldt {{\.LCPI.*}}
+; X86-X87-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-X87-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
; X86-X87-NEXT: fucompp
; X86-X87-NEXT: fnstsw %ax
@@ -4088,7 +4088,7 @@ define i128 @test_unsigned_i128_f80(x86_fp80 %f) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: .LBB39_2:
-; X86-SSE-NEXT: fldt {{\.LCPI.*}}
+; X86-SSE-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE-NEXT: fxch %st(1)
; X86-SSE-NEXT: fucompi %st(1), %st
; X86-SSE-NEXT: fstp %st(0)
diff --git a/llvm/test/CodeGen/X86/funnel-shift-rot.ll b/llvm/test/CodeGen/X86/funnel-shift-rot.ll
index b5e17ab80208..a777a0d0adc9 100644
--- a/llvm/test/CodeGen/X86/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/X86/funnel-shift-rot.ll
@@ -130,9 +130,9 @@ define i32 @rotl_i32(i32 %x, i32 %z) nounwind {
define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
; X32-SSE2-LABEL: rotl_v4i32:
; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE2-NEXT: pslld $23, %xmm1
-; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X32-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -324,9 +324,9 @@ define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: pxor %xmm2, %xmm2
; X32-SSE2-NEXT: psubd %xmm1, %xmm2
-; X32-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X32-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X32-SSE2-NEXT: pslld $23, %xmm2
-; X32-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2
+; X32-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X32-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/haddsub-broadcast.ll b/llvm/test/CodeGen/X86/haddsub-broadcast.ll
index ec617bb2b03a..8290d63cdccf 100644
--- a/llvm/test/CodeGen/X86/haddsub-broadcast.ll
+++ b/llvm/test/CodeGen/X86/haddsub-broadcast.ll
@@ -8,7 +8,7 @@ define <4 x double> @PR43402(i64 %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; CHECK-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0
+; CHECK-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; CHECK-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/half.ll b/llvm/test/CodeGen/X86/half.ll
index d9629730fce2..9f59af1ec6d5 100644
--- a/llvm/test/CodeGen/X86/half.ll
+++ b/llvm/test/CodeGen/X86/half.ll
@@ -364,7 +364,7 @@ define void @test_uitofp_i64(i64 %a, half* %p) #0 {
; CHECK-I686-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; CHECK-I686-NEXT: shrl $31, %eax
; CHECK-I686-NEXT: fildll {{[0-9]+}}(%esp)
-; CHECK-I686-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; CHECK-I686-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; CHECK-I686-NEXT: fstps (%esp)
; CHECK-I686-NEXT: calll __gnu_f2h_ieee
; CHECK-I686-NEXT: movw %ax, (%esi)
diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
index 29994c41f0b9..4752e63c9f4a 100644
--- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
@@ -499,7 +499,7 @@ define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -508,7 +508,7 @@ define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
; X86-SSE2-NEXT: retl
;
@@ -586,7 +586,7 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -595,7 +595,7 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-NEXT: pmuludq %xmm3, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pcmpeqd %xmm2, %xmm0
; X86-SSE2-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
index 5175ccadd9e7..29667b4f728c 100644
--- a/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
+++ b/llvm/test/CodeGen/X86/hoist-and-by-const-from-shl-in-eqcmp-zero.ll
@@ -461,7 +461,7 @@ define <4 x i1> @vec_4xi32_splat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-LABEL: vec_4xi32_splat_eq:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
@@ -510,7 +510,7 @@ define <4 x i1> @vec_4xi32_nonsplat_eq(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-LABEL: vec_4xi32_nonsplat_eq:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,1,16776960,2147483648]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
@@ -563,12 +563,12 @@ define <4 x i1> @vec_4xi32_nonsplat_undef0_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-NEXT: movl $1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-SSE2-NEXT: pand %xmm2, %xmm0
@@ -611,7 +611,7 @@ define <4 x i1> @vec_4xi32_nonsplat_undef1_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-LABEL: vec_4xi32_nonsplat_undef1_eq:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [1,1,1,1]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
@@ -661,12 +661,12 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
; X86-SSE2-NEXT: movl $1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X86-SSE2-NEXT: pmuludq {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-SSE2-NEXT: pand %xmm2, %xmm0
diff --git a/llvm/test/CodeGen/X86/i64-to-float.ll b/llvm/test/CodeGen/X86/i64-to-float.ll
index c181f71e0d81..f7a2d330206e 100644
--- a/llvm/test/CodeGen/X86/i64-to-float.ll
+++ b/llvm/test/CodeGen/X86/i64-to-float.ll
@@ -14,7 +14,7 @@ define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_sitofp_2i64_2f64:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X86-SSE-NEXT: retl
;
@@ -32,7 +32,7 @@ define <2 x double> @mask_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
;
; X86-AVX512DQ-LABEL: mask_sitofp_2i64_2f64:
; X86-AVX512DQ: # %bb.0:
-; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; X86-AVX512DQ-NEXT: retl
;
@@ -69,7 +69,7 @@ define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_uitofp_2i64_2f64:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
; X86-SSE-NEXT: retl
;
@@ -87,7 +87,7 @@ define <2 x double> @mask_uitofp_2i64_2f64(<2 x i64> %a) nounwind {
;
; X86-AVX512DQ-LABEL: mask_uitofp_2i64_2f64:
; X86-AVX512DQ: # %bb.0:
-; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; X86-AVX512DQ-NEXT: retl
;
@@ -124,7 +124,7 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_sitofp_4i64_4f32:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE-NEXT: retl
;
@@ -132,7 +132,7 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
@@ -140,14 +140,14 @@ define <4 x float> @mask_sitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX512F-LABEL: mask_sitofp_4i64_4f32:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0
-; X86-AVX512F-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX512F-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX512F-NEXT: vzeroupper
; X86-AVX512F-NEXT: retl
;
; X86-AVX512DQ-LABEL: mask_sitofp_4i64_4f32:
; X86-AVX512DQ: # %bb.0:
-; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; X86-AVX512DQ-NEXT: vzeroupper
; X86-AVX512DQ-NEXT: retl
@@ -191,7 +191,7 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-SSE-LABEL: mask_uitofp_4i64_4f32:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-SSE-NEXT: retl
;
@@ -199,7 +199,7 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vextractf128 $1, %ymm0, %xmm1
; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
@@ -207,14 +207,14 @@ define <4 x float> @mask_uitofp_4i64_4f32(<4 x i64> %a) nounwind {
; X86-AVX512F-LABEL: mask_uitofp_4i64_4f32:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vpmovqd %ymm0, %xmm0
-; X86-AVX512F-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX512F-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512F-NEXT: vcvtdq2ps %xmm0, %xmm0
; X86-AVX512F-NEXT: vzeroupper
; X86-AVX512F-NEXT: retl
;
; X86-AVX512DQ-LABEL: mask_uitofp_4i64_4f32:
; X86-AVX512DQ: # %bb.0:
-; X86-AVX512DQ-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX512DQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX512DQ-NEXT: vcvtqq2ps %ymm0, %xmm0
; X86-AVX512DQ-NEXT: vzeroupper
; X86-AVX512DQ-NEXT: retl
@@ -270,7 +270,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3]
; X86-SSE-NEXT: por %xmm2, %xmm3
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pandn {{\.LCPI.*}}, %xmm3
+; X86-SSE-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE-NEXT: por %xmm0, %xmm3
; X86-SSE-NEXT: pxor %xmm3, %xmm1
; X86-SSE-NEXT: movdqa {{.*#+}} xmm0 = [2147483903,0,2147483903,0]
@@ -283,7 +283,7 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
; X86-SSE-NEXT: por %xmm0, %xmm1
; X86-SSE-NEXT: pand %xmm1, %xmm3
-; X86-SSE-NEXT: pandn {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: por %xmm3, %xmm1
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
@@ -305,16 +305,16 @@ define <2 x double> @clamp_sitofp_2i64_2f64(<2 x i64> %a) nounwind {
;
; X86-AVX512F-LABEL: clamp_sitofp_2i64_2f64:
; X86-AVX512F: # %bb.0:
-; X86-AVX512F-NEXT: vpmaxsq {{\.LCPI.*}}, %xmm0, %xmm0
-; X86-AVX512F-NEXT: vpminsq {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX512F-NEXT: vpmaxsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX512F-NEXT: vpminsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512F-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-AVX512F-NEXT: vcvtdq2pd %xmm0, %xmm0
; X86-AVX512F-NEXT: retl
;
; X86-AVX512DQ-LABEL: clamp_sitofp_2i64_2f64:
; X86-AVX512DQ: # %bb.0:
-; X86-AVX512DQ-NEXT: vpmaxsq {{\.LCPI.*}}, %xmm0, %xmm0
-; X86-AVX512DQ-NEXT: vpminsq {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX512DQ-NEXT: vpmaxsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX512DQ-NEXT: vpminsq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX512DQ-NEXT: vcvtqq2pd %xmm0, %xmm0
; X86-AVX512DQ-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
index 37c73e07a17b..3aa1abae99f3 100644
--- a/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
+++ b/llvm/test/CodeGen/X86/insert-into-constant-vector.ll
@@ -16,7 +16,7 @@ define <16 x i8> @elt0_v16i8(i8 %x) {
; X86-SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE2-NEXT: movaps {{.*#+}} xmm0 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: andnps %xmm1, %xmm0
-; X86-SSE2-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X64-SSE2-LABEL: elt0_v16i8:
@@ -393,7 +393,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X86-AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0]
; X86-AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X86-AVX1-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm0, %ymm1
+; X86-AVX1-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1
; X86-AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0]
; X86-AVX1-NEXT: retl
;
@@ -410,7 +410,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X86-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX2-NEXT: vmovaps {{.*#+}} xmm1 = [4,0,0,0]
; X86-AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
-; X86-AVX2-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm0, %ymm1
+; X86-AVX2-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1
; X86-AVX2-NEXT: vmovaps {{.*#+}} ymm0 = [42,0,1,0,2,0,3,0]
; X86-AVX2-NEXT: retl
;
@@ -428,7 +428,7 @@ define <8 x i64> @elt5_v8i64(i64 %x) {
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86-AVX512F-NEXT: vmovaps {{.*#+}} xmm2 = [4,0,0,0]
; X86-AVX512F-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
-; X86-AVX512F-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm1, %ymm1
+; X86-AVX512F-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/insertelement-var-index.ll b/llvm/test/CodeGen/X86/insertelement-var-index.ll
index a0b7df81d580..133d6c75029c 100644
--- a/llvm/test/CodeGen/X86/insertelement-var-index.ll
+++ b/llvm/test/CodeGen/X86/insertelement-var-index.ll
@@ -1380,7 +1380,7 @@ define <8 x float> @arg_f32_v8f32(<8 x float> %v, float %x, i32 %y) nounwind {
; AVX1-NEXT: vmovd %edi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm2, %xmm3
-; AVX1-NEXT: vpcmpeqd {{\.LCPI.*}}+{{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvps %ymm2, %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -1424,7 +1424,7 @@ define <4 x double> @arg_f64_v4f64(<4 x double> %v, double %x, i32 %y) nounwind
; AVX1-NEXT: vmovq %rax, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm2, %xmm3
-; AVX1-NEXT: vpcmpeqq {{\.LCPI.*}}+{{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vblendvpd %ymm2, %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -1661,7 +1661,7 @@ define <8 x float> @load_f32_v8f32(<8 x float> %v, float* %p, i32 %y) nounwind {
; AVX1-NEXT: vmovd %esi, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-NEXT: vpcmpeqd {{.*}}(%rip), %xmm1, %xmm2
-; AVX1-NEXT: vpcmpeqd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vbroadcastss (%rdi), %ymm2
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
@@ -1706,7 +1706,7 @@ define <4 x double> @load_f64_v4f64(<4 x double> %v, double* %p, i32 %y) nounwin
; AVX1-NEXT: vmovq %rax, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX1-NEXT: vpcmpeqq {{.*}}(%rip), %xmm1, %xmm2
-; AVX1-NEXT: vpcmpeqq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpeqq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vbroadcastsd (%rdi), %ymm2
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll
index 79c56b10afce..2a8ea368edb0 100644
--- a/llvm/test/CodeGen/X86/known-bits-vector.ll
+++ b/llvm/test/CodeGen/X86/known-bits-vector.ll
@@ -74,7 +74,7 @@ define <4 x float> @knownbits_insert_uitofp(<4 x i32> %a0, i16 %a1, i16 %a2) nou
define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_sext:
; X32: # %bb.0:
-; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X32-NEXT: retl
@@ -94,7 +94,7 @@ define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_shuffle_sext:
; X32: # %bb.0:
-; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X32-NEXT: retl
@@ -115,7 +115,7 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_sext(<8 x i16> %a0) nounwind {
define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_shuffle_undef_sext:
; X32: # %bb.0:
-; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; X32-NEXT: vpmovsxwd %xmm0, %xmm0
; X32-NEXT: retl
@@ -136,7 +136,7 @@ define <4 x i32> @knownbits_mask_shuffle_shuffle_undef_sext(<8 x i16> %a0) nounw
define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_shuffle_uitofp:
; X32: # %bb.0:
-; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
@@ -173,8 +173,8 @@ define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
; X32-LABEL: knownbits_mask_xor_shuffle_uitofp:
; X32: # %bb.0:
-; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X32-NEXT: vxorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,2,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
@@ -384,8 +384,8 @@ declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
define <8 x float> @knownbits_mask_concat_uitofp(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_concat_uitofp:
; X32: # %bb.0:
-; X32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vandps {{\.LCPI.*}}, %xmm1, %xmm1
+; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X32-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[1,3,1,3]
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
@@ -432,8 +432,8 @@ define <4 x float> @knownbits_lshr_bitcast_shuffle_uitofp(<2 x i64> %a0, <4 x i3
define <4 x float> @knownbits_smax_smin_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_smax_smin_shuffle_uitofp:
; X32: # %bb.0:
-; X32-NEXT: vpminsd {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vpmaxsd {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpminsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X32-NEXT: vpmaxsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
@@ -457,7 +457,7 @@ declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x float> @knownbits_umin_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_umin_shuffle_uitofp:
; X32: # %bb.0:
-; X32-NEXT: vpminud {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpminud {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
@@ -495,8 +495,8 @@ declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_mask_umax_shuffle_uitofp:
; X32: # %bb.0:
-; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
-; X32-NEXT: vpmaxud {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X32-NEXT: vpmaxud {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-NEXT: retl
@@ -540,7 +540,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X32-NEXT: vpsrld $16, %xmm0, %xmm0
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; X32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vsubps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
; X32-NEXT: retl
;
@@ -563,7 +563,7 @@ define <4 x float> @knownbits_abs_uitofp(<4 x i32> %a0) {
define <4 x float> @knownbits_or_abs_uitofp(<4 x i32> %a0) {
; X32-LABEL: knownbits_or_abs_uitofp:
; X32: # %bb.0:
-; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT: vpor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2]
; X32-NEXT: vpabsd %xmm0, %xmm0
; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -593,8 +593,8 @@ define <4 x float> @knownbits_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x i32>
; X32-NEXT: andl $-16, %esp
; X32-NEXT: subl $16, %esp
; X32-NEXT: vmovaps 8(%ebp), %xmm3
-; X32-NEXT: vandps {{\.LCPI.*}}, %xmm2, %xmm2
-; X32-NEXT: vandps {{\.LCPI.*}}, %xmm3, %xmm3
+; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm2, %xmm2
+; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm3, %xmm3
; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]
@@ -630,7 +630,7 @@ define <4 x float> @knownbits_lshr_and_select_shuffle_uitofp(<4 x i32> %a0, <4 x
; X32-NEXT: subl $16, %esp
; X32-NEXT: vmovaps 8(%ebp), %xmm3
; X32-NEXT: vpsrld $5, %xmm2, %xmm2
-; X32-NEXT: vandps {{\.LCPI.*}}, %xmm3, %xmm3
+; X32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm3, %xmm3
; X32-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; X32-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
; X32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,2,2]
diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll
index bed0abf5a26b..3f22dbe316e7 100644
--- a/llvm/test/CodeGen/X86/known-signbits-vector.ll
+++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll
@@ -515,7 +515,7 @@ define <4 x i32> @signbits_mask_ashr_smax(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_smax:
@@ -553,7 +553,7 @@ define <4 x i32> @signbits_mask_ashr_smin(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_smin:
@@ -591,7 +591,7 @@ define <4 x i32> @signbits_mask_ashr_umax(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_umax:
@@ -629,7 +629,7 @@ define <4 x i32> @signbits_mask_ashr_umin(<4 x i32> %a0, <4 x i32> %a1) {
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
; X86-NEXT: vpminud %xmm1, %xmm0, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
-; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_umin:
@@ -674,7 +674,7 @@ define void @cross_bb_signbits_insert_subvec(<32 x i8>* %ptr, <32 x i8> %x, <32
; X86-NEXT: vpcmpeqb %xmm3, %xmm0, %xmm0
; X86-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; X86-NEXT: vandnps %ymm1, %ymm0, %ymm1
-; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: vorps %ymm1, %ymm0, %ymm0
; X86-NEXT: vmovaps %ymm0, (%eax)
; X86-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/limited-prec.ll b/llvm/test/CodeGen/X86/limited-prec.ll
index 97882f113b69..68ba8e21bb53 100644
--- a/llvm/test/CodeGen/X86/limited-prec.ll
+++ b/llvm/test/CodeGen/X86/limited-prec.ll
@@ -8,7 +8,7 @@ define float @f1(float %x) nounwind noinline {
; precision6: # %bb.0: # %entry
; precision6-NEXT: subl $20, %esp
; precision6-NEXT: flds {{[0-9]+}}(%esp)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fnstcw (%esp)
; precision6-NEXT: movzwl (%esp), %eax
; precision6-NEXT: orl $3072, %eax # imm = 0xC00
@@ -20,10 +20,10 @@ define float @f1(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: fisubl {{[0-9]+}}(%esp)
; precision6-NEXT: fld %st(0)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1)
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fstps {{[0-9]+}}(%esp)
; precision6-NEXT: shll $23, %eax
; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -36,7 +36,7 @@ define float @f1(float %x) nounwind noinline {
; precision12: # %bb.0: # %entry
; precision12-NEXT: subl $20, %esp
; precision12-NEXT: flds {{[0-9]+}}(%esp)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fnstcw (%esp)
; precision12-NEXT: movzwl (%esp), %eax
; precision12-NEXT: orl $3072, %eax # imm = 0xC00
@@ -48,12 +48,12 @@ define float @f1(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: fisubl {{[0-9]+}}(%esp)
; precision12-NEXT: fld %st(0)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1)
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fstps {{[0-9]+}}(%esp)
; precision12-NEXT: shll $23, %eax
; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -66,7 +66,7 @@ define float @f1(float %x) nounwind noinline {
; precision18: # %bb.0: # %entry
; precision18-NEXT: subl $20, %esp
; precision18-NEXT: flds {{[0-9]+}}(%esp)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fnstcw (%esp)
; precision18-NEXT: movzwl (%esp), %eax
; precision18-NEXT: orl $3072, %eax # imm = 0xC00
@@ -78,16 +78,16 @@ define float @f1(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: fisubl {{[0-9]+}}(%esp)
; precision18-NEXT: fld %st(0)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fld1
; precision18-NEXT: faddp %st, %st(1)
@@ -122,10 +122,10 @@ define float @f2(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: fisubl {{[0-9]+}}(%esp)
; precision6-NEXT: fld %st(0)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1)
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fstps {{[0-9]+}}(%esp)
; precision6-NEXT: shll $23, %eax
; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -149,12 +149,12 @@ define float @f2(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: fisubl {{[0-9]+}}(%esp)
; precision12-NEXT: fld %st(0)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1)
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fstps {{[0-9]+}}(%esp)
; precision12-NEXT: shll $23, %eax
; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -178,16 +178,16 @@ define float @f2(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: fisubl {{[0-9]+}}(%esp)
; precision18-NEXT: fld %st(0)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fld1
; precision18-NEXT: faddp %st, %st(1)
@@ -211,7 +211,7 @@ define float @f3(float %x) nounwind noinline {
; precision6: # %bb.0: # %entry
; precision6-NEXT: subl $20, %esp
; precision6-NEXT: flds {{[0-9]+}}(%esp)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fnstcw (%esp)
; precision6-NEXT: movzwl (%esp), %eax
; precision6-NEXT: orl $3072, %eax # imm = 0xC00
@@ -223,10 +223,10 @@ define float @f3(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: fisubl {{[0-9]+}}(%esp)
; precision6-NEXT: fld %st(0)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1)
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fstps {{[0-9]+}}(%esp)
; precision6-NEXT: shll $23, %eax
; precision6-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -239,7 +239,7 @@ define float @f3(float %x) nounwind noinline {
; precision12: # %bb.0: # %entry
; precision12-NEXT: subl $20, %esp
; precision12-NEXT: flds {{[0-9]+}}(%esp)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fnstcw (%esp)
; precision12-NEXT: movzwl (%esp), %eax
; precision12-NEXT: orl $3072, %eax # imm = 0xC00
@@ -251,12 +251,12 @@ define float @f3(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: fisubl {{[0-9]+}}(%esp)
; precision12-NEXT: fld %st(0)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1)
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fstps {{[0-9]+}}(%esp)
; precision12-NEXT: shll $23, %eax
; precision12-NEXT: addl {{[0-9]+}}(%esp), %eax
@@ -269,7 +269,7 @@ define float @f3(float %x) nounwind noinline {
; precision18: # %bb.0: # %entry
; precision18-NEXT: subl $20, %esp
; precision18-NEXT: flds {{[0-9]+}}(%esp)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fnstcw (%esp)
; precision18-NEXT: movzwl (%esp), %eax
; precision18-NEXT: orl $3072, %eax # imm = 0xC00
@@ -281,16 +281,16 @@ define float @f3(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: fisubl {{[0-9]+}}(%esp)
; precision18-NEXT: fld %st(0)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1)
; precision18-NEXT: fld1
; precision18-NEXT: faddp %st, %st(1)
@@ -324,12 +324,12 @@ define float @f4(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: flds (%esp)
; precision6-NEXT: fld %st(0)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1)
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fildl {{[0-9]+}}(%esp)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: faddp %st, %st(1)
; precision6-NEXT: addl $8, %esp
; precision6-NEXT: retl
@@ -348,16 +348,16 @@ define float @f4(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: flds (%esp)
; precision12-NEXT: fld %st(0)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1)
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fildl {{[0-9]+}}(%esp)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: faddp %st, %st(1)
; precision12-NEXT: addl $8, %esp
; precision12-NEXT: retl
@@ -376,20 +376,20 @@ define float @f4(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: flds (%esp)
; precision18-NEXT: fld %st(0)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1)
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fildl {{[0-9]+}}(%esp)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: faddp %st, %st(1)
; precision18-NEXT: addl $8, %esp
; precision18-NEXT: retl
@@ -416,10 +416,10 @@ define float @f5(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: flds (%esp)
; precision6-NEXT: fld %st(0)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1)
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fiaddl {{[0-9]+}}(%esp)
; precision6-NEXT: addl $8, %esp
; precision6-NEXT: retl
@@ -438,14 +438,14 @@ define float @f5(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: flds (%esp)
; precision12-NEXT: fld %st(0)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1)
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fiaddl {{[0-9]+}}(%esp)
; precision12-NEXT: addl $8, %esp
; precision12-NEXT: retl
@@ -464,18 +464,18 @@ define float @f5(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: flds (%esp)
; precision18-NEXT: fld %st(0)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1)
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fiaddl {{[0-9]+}}(%esp)
; precision18-NEXT: addl $8, %esp
; precision18-NEXT: retl
@@ -502,12 +502,12 @@ define float @f6(float %x) nounwind noinline {
; precision6-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision6-NEXT: flds (%esp)
; precision6-NEXT: fld %st(0)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fmulp %st, %st(1)
-; precision6-NEXT: fadds {{\.LCPI.*}}
+; precision6-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: fildl {{[0-9]+}}(%esp)
-; precision6-NEXT: fmuls {{\.LCPI.*}}
+; precision6-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision6-NEXT: faddp %st, %st(1)
; precision6-NEXT: addl $8, %esp
; precision6-NEXT: retl
@@ -526,14 +526,14 @@ define float @f6(float %x) nounwind noinline {
; precision12-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision12-NEXT: flds (%esp)
; precision12-NEXT: fld %st(0)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmul %st(1), %st
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fmulp %st, %st(1)
-; precision12-NEXT: fadds {{\.LCPI.*}}
+; precision12-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: fildl {{[0-9]+}}(%esp)
-; precision12-NEXT: fmuls {{\.LCPI.*}}
+; precision12-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision12-NEXT: faddp %st, %st(1)
; precision12-NEXT: addl $8, %esp
; precision12-NEXT: retl
@@ -552,18 +552,18 @@ define float @f6(float %x) nounwind noinline {
; precision18-NEXT: movl %eax, {{[0-9]+}}(%esp)
; precision18-NEXT: flds (%esp)
; precision18-NEXT: fld %st(0)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmul %st(1), %st
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fmulp %st, %st(1)
-; precision18-NEXT: fadds {{\.LCPI.*}}
+; precision18-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: fildl {{[0-9]+}}(%esp)
-; precision18-NEXT: fmuls {{\.LCPI.*}}
+; precision18-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; precision18-NEXT: faddp %st, %st(1)
; precision18-NEXT: addl $8, %esp
; precision18-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 0b6e3c9d830d..ae736031a9cb 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -517,13 +517,13 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
; SKX_LARGE: # %bb.0: # %entry
; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1}
@@ -531,12 +531,12 @@ define <8 x i32> @test9(%struct.ST* %base, <8 x i64> %ind1, <8 x i32>%ind5) {
;
; SKX_32-LABEL: test9:
; SKX_32: # %bb.0: # %entry
-; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1
+; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm1, %ymm1
; SKX_32-NEXT: vpmovqd %zmm0, %ymm0
-; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
+; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1
+; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1}
; SKX_32-NEXT: retl
@@ -603,13 +603,13 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
; SKX_LARGE: # %bb.0: # %entry
; SKX_LARGE-NEXT: vpbroadcastq %rdi, %zmm2
; SKX_LARGE-NEXT: vpmovzxdq {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmuldq (%rax){1to8}, %zmm1, %zmm1
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpmullq (%rax){1to8}, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpaddq %zmm0, %zmm2, %zmm0
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vpgatherqd (,%zmm1), %ymm0 {%k1}
@@ -617,12 +617,12 @@ define <8 x i32> @test10(%struct.ST* %base, <8 x i64> %i1, <8 x i32>%ind5) {
;
; SKX_32-LABEL: test10:
; SKX_32: # %bb.0: # %entry
-; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm1, %ymm1
+; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm1, %ymm1
; SKX_32-NEXT: vpmovqd %zmm0, %ymm0
-; SKX_32-NEXT: vpmulld {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
+; SKX_32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd {{[0-9]+}}(%esp){1to8}, %ymm0, %ymm0
; SKX_32-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1
+; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherdd (,%ymm1), %ymm0 {%k1}
; SKX_32-NEXT: retl
@@ -2893,7 +2893,7 @@ define <16 x float> @zext_index(float* %base, <16 x i32> %ind) {
; KNL_32-LABEL: zext_index:
; KNL_32: # %bb.0:
; KNL_32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; KNL_32-NEXT: vpandd {{\.LCPI.*}}{1to16}, %zmm0, %zmm1
+; KNL_32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm1
; KNL_32-NEXT: kxnorw %k0, %k0, %k1
; KNL_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
; KNL_32-NEXT: retl
@@ -2907,7 +2907,7 @@ define <16 x float> @zext_index(float* %base, <16 x i32> %ind) {
;
; SKX_LARGE-LABEL: zext_index:
; SKX_LARGE: # %bb.0:
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vandps (%rax){1to16}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vgatherdps (%rdi,%zmm1,4), %zmm0 {%k1}
@@ -2916,7 +2916,7 @@ define <16 x float> @zext_index(float* %base, <16 x i32> %ind) {
; SKX_32-LABEL: zext_index:
; SKX_32: # %bb.0:
; SKX_32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; SKX_32-NEXT: vandps {{\.LCPI.*}}{1to16}, %zmm0, %zmm1
+; SKX_32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
; SKX_32-NEXT: retl
@@ -3184,7 +3184,7 @@ define <2 x i64> @gather_2i64_constant_indices(i64* %ptr, <2 x i1> %mask) {
; SKX_LARGE: # %bb.0:
; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm1
; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpgatherdq (%rdi,%xmm1,8), %xmm0 {%k1}
@@ -3241,7 +3241,7 @@ define <16 x i32> @gather_16i64_constant_indices(i32* %ptr, <16 x i1> %mask) {
; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm1
; SKX_LARGE-NEXT: vpxor %xmm0, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpgatherdd (%rdi,%zmm1,4), %zmm0 {%k1}
@@ -3300,7 +3300,7 @@ define void @scatter_2i64_constant_indices(i32* %ptr, <2 x i1> %mask, <2 x i32>
; SKX_LARGE: # %bb.0:
; SKX_LARGE-NEXT: vpsllq $63, %xmm0, %xmm0
; SKX_LARGE-NEXT: vpmovq2m %xmm0, %k1
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa (%rax), %xmm0
; SKX_LARGE-NEXT: vpscatterdd %xmm1, (%rdi,%xmm0,4) {%k1}
; SKX_LARGE-NEXT: retq
@@ -3355,7 +3355,7 @@ define void @scatter_16i64_constant_indices(i32* %ptr, <16 x i1> %mask, <16 x i3
; SKX_LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; SKX_LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; SKX_LARGE-NEXT: vpmovd2m %zmm0, %k1
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vmovdqa64 (%rax), %zmm0
; SKX_LARGE-NEXT: vpscatterdd %zmm1, (%rdi,%zmm0,4) {%k1}
; SKX_LARGE-NEXT: vzeroupper
@@ -3506,7 +3506,7 @@ define <8 x i64> @pr45906(<8 x %struct.foo*> %ptr) {
;
; SKX_LARGE-LABEL: pr45906:
; SKX_LARGE: # %bb.0: # %bb
-; SKX_LARGE-NEXT: movabsq ${{\.LCPI.*}}, %rax
+; SKX_LARGE-NEXT: movabsq ${{\.LCPI[0-9]+_[0-9]+}}, %rax
; SKX_LARGE-NEXT: vpaddq (%rax){1to8}, %zmm0, %zmm1
; SKX_LARGE-NEXT: kxnorw %k0, %k0, %k1
; SKX_LARGE-NEXT: vpgatherqq (,%zmm1), %zmm0 {%k1}
@@ -3514,7 +3514,7 @@ define <8 x i64> @pr45906(<8 x %struct.foo*> %ptr) {
;
; SKX_32-LABEL: pr45906:
; SKX_32: # %bb.0: # %bb
-; SKX_32-NEXT: vpaddd {{\.LCPI.*}}{1to8}, %ymm0, %ymm1
+; SKX_32-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm1
; SKX_32-NEXT: kxnorw %k0, %k0, %k1
; SKX_32-NEXT: vpgatherdq (,%ymm1), %zmm0 {%k1}
; SKX_32-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/memcmp-minsize.ll b/llvm/test/CodeGen/X86/memcmp-minsize.ll
index f6b74ec77378..455c07d093da 100644
--- a/llvm/test/CodeGen/X86/memcmp-minsize.ll
+++ b/llvm/test/CodeGen/X86/memcmp-minsize.ll
@@ -456,7 +456,7 @@ define i1 @length16_eq_const(i8* %X) nounwind minsize {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al
diff --git a/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll b/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll
index 0ba4f7b6d884..878e0cd256c1 100644
--- a/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll
+++ b/llvm/test/CodeGen/X86/memcmp-more-load-pairs.ll
@@ -1480,7 +1480,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al
@@ -1490,7 +1490,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE41: # %bb.0:
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al
; X86-SSE41-NEXT: retl
@@ -1823,8 +1823,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -1836,8 +1836,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al
@@ -2312,8 +2312,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -2325,8 +2325,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al
@@ -2816,8 +2816,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -2829,8 +2829,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al
@@ -3293,9 +3293,9 @@ define i1 @length48_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pand %xmm1, %xmm2
; X86-SSE2-NEXT: pand %xmm0, %xmm2
; X86-SSE2-NEXT: pmovmskb %xmm2, %eax
@@ -3309,9 +3309,9 @@ define i1 @length48_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE41-NEXT: por %xmm1, %xmm2
; X86-SSE41-NEXT: por %xmm0, %xmm2
; X86-SSE41-NEXT: ptest %xmm2, %xmm2
@@ -3673,12 +3673,12 @@ define i1 @length63_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE2-NEXT: movdqu 47(%eax), %xmm3
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pand %xmm3, %xmm2
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pand %xmm2, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -3692,12 +3692,12 @@ define i1 @length63_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE41-NEXT: movdqu 47(%eax), %xmm3
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE41-NEXT: por %xmm3, %xmm2
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: por %xmm2, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al
@@ -4079,12 +4079,12 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE2-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE2-NEXT: movdqu 48(%eax), %xmm3
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm3
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pand %xmm3, %xmm2
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pand %xmm2, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -4098,12 +4098,12 @@ define i1 @length64_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
; X86-SSE41-NEXT: movdqu 32(%eax), %xmm2
; X86-SSE41-NEXT: movdqu 48(%eax), %xmm3
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm3
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm2
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE41-NEXT: por %xmm3, %xmm2
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE41-NEXT: por %xmm2, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al
diff --git a/llvm/test/CodeGen/X86/memcmp-optsize.ll b/llvm/test/CodeGen/X86/memcmp-optsize.ll
index 1e56b40bc90c..1d67355d6ff4 100644
--- a/llvm/test/CodeGen/X86/memcmp-optsize.ll
+++ b/llvm/test/CodeGen/X86/memcmp-optsize.ll
@@ -590,7 +590,7 @@ define i1 @length16_eq_const(i8* %X) nounwind optsize {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al
@@ -715,8 +715,8 @@ define i1 @length24_eq_const(i8* %X) nounwind optsize {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -854,8 +854,8 @@ define i1 @length32_eq_const(i8* %X) nounwind optsize {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
diff --git a/llvm/test/CodeGen/X86/memcmp-pgso.ll b/llvm/test/CodeGen/X86/memcmp-pgso.ll
index 469d2be91b45..7ea412e893ca 100644
--- a/llvm/test/CodeGen/X86/memcmp-pgso.ll
+++ b/llvm/test/CodeGen/X86/memcmp-pgso.ll
@@ -590,7 +590,7 @@ define i1 @length16_eq_const(i8* %X) nounwind !prof !14 {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al
@@ -715,8 +715,8 @@ define i1 @length24_eq_const(i8* %X) nounwind !prof !14 {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -854,8 +854,8 @@ define i1 @length32_eq_const(i8* %X) nounwind !prof !14 {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
diff --git a/llvm/test/CodeGen/X86/memcmp.ll b/llvm/test/CodeGen/X86/memcmp.ll
index 18ee773d14ea..e05573544335 100644
--- a/llvm/test/CodeGen/X86/memcmp.ll
+++ b/llvm/test/CodeGen/X86/memcmp.ll
@@ -1485,7 +1485,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; X86-SSE2-NEXT: sete %al
@@ -1495,7 +1495,7 @@ define i1 @length16_eq_const(i8* %X) nounwind {
; X86-SSE41: # %bb.0:
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: sete %al
; X86-SSE41-NEXT: retl
@@ -1756,8 +1756,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -1769,8 +1769,8 @@ define i1 @length24_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 8(%eax), %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al
@@ -2152,8 +2152,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 15(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -2165,8 +2165,8 @@ define i1 @length31_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 15(%eax), %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al
@@ -2563,8 +2563,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: movdqu (%eax), %xmm0
; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
-; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
@@ -2576,8 +2576,8 @@ define i1 @length32_eq_const(i8* %X) nounwind {
; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE41-NEXT: movdqu (%eax), %xmm0
; X86-SSE41-NEXT: movdqu 16(%eax), %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm1
-; X86-SSE41-NEXT: pxor {{\.LCPI.*}}, %xmm0
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE41-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE41-NEXT: por %xmm1, %xmm0
; X86-SSE41-NEXT: ptest %xmm0, %xmm0
; X86-SSE41-NEXT: setne %al
diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
index 210ad51b0a18..7e0f3b7cbfb0 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-256.ll
@@ -501,7 +501,7 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups (%eax), %ymm0
-; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
diff --git a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
index 3eb837d16e0c..d24a710746d3 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-loads-512.ll
@@ -138,7 +138,7 @@ define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noin
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
-; X86-AVX512F-NEXT: vpandq {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-AVX512F-NEXT: vpandq {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 1
%ptr2 = getelementptr inbounds double, double* %ptr, i64 3
@@ -217,7 +217,7 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
-; X86-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-AVX512F-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
%ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
@@ -436,7 +436,7 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 (%eax), %zmm0
-; X86-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-AVX512F-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
diff --git a/llvm/test/CodeGen/X86/mmx-arith.ll b/llvm/test/CodeGen/X86/mmx-arith.ll
index c5c78938c444..c81520b98cdb 100644
--- a/llvm/test/CodeGen/X86/mmx-arith.ll
+++ b/llvm/test/CodeGen/X86/mmx-arith.ll
@@ -33,7 +33,7 @@ define void @test0(x86_mmx* %A, x86_mmx* %B) {
; X32-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X32-NEXT: pmullw %xmm0, %xmm1
-; X32-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: packuswb %xmm1, %xmm1
; X32-NEXT: movq %xmm1, (%eax)
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
@@ -658,7 +658,7 @@ define i64 @pr43922() {
; X32-NEXT: .cfi_def_cfa_register %ebp
; X32-NEXT: andl $-8, %esp
; X32-NEXT: subl $8, %esp
-; X32-NEXT: movq {{\.LCPI.*}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
+; X32-NEXT: movq {{\.LCPI[0-9]+_[0-9]+}}, %mm0 # mm0 = 0x7AAAAAAA7AAAAAAA
; X32-NEXT: psrad $255, %mm0
; X32-NEXT: movq %mm0, (%esp)
; X32-NEXT: movl (%esp), %eax
diff --git a/llvm/test/CodeGen/X86/mmx-fold-zero.ll b/llvm/test/CodeGen/X86/mmx-fold-zero.ll
index 73dc8a81a807..7f7d71612eea 100644
--- a/llvm/test/CodeGen/X86/mmx-fold-zero.ll
+++ b/llvm/test/CodeGen/X86/mmx-fold-zero.ll
@@ -32,7 +32,7 @@ define double @mmx_zero(double, double, double, double) nounwind {
; X86-NEXT: paddw %mm2, %mm0
; X86-NEXT: paddw %mm6, %mm0
; X86-NEXT: pmuludq %mm3, %mm0
-; X86-NEXT: paddw {{\.LCPI.*}}, %mm0
+; X86-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %mm0
; X86-NEXT: paddw %mm1, %mm0
; X86-NEXT: pmuludq %mm7, %mm0
; X86-NEXT: pmuludq (%esp), %mm0 # 8-byte Folded Reload
@@ -70,7 +70,7 @@ define double @mmx_zero(double, double, double, double) nounwind {
; X64-NEXT: paddw %mm2, %mm0
; X64-NEXT: paddw %mm6, %mm0
; X64-NEXT: pmuludq %mm3, %mm0
-; X64-NEXT: paddw {{\.LCPI.*}}, %mm0
+; X64-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %mm0
; X64-NEXT: paddw %mm1, %mm0
; X64-NEXT: pmuludq %mm7, %mm0
; X64-NEXT: pmuludq {{[-0-9]+}}(%r{{[sb]}}p), %mm0 # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/X86/neg_fp.ll b/llvm/test/CodeGen/X86/neg_fp.ll
index e07157c952f7..a82d51a236f5 100644
--- a/llvm/test/CodeGen/X86/neg_fp.ll
+++ b/llvm/test/CodeGen/X86/neg_fp.ll
@@ -10,7 +10,7 @@ define float @negfp(float %a, float %b) nounwind {
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: subss {{[0-9]+}}(%esp), %xmm0
-; CHECK-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movss %xmm0, (%esp)
; CHECK-NEXT: flds (%esp)
; CHECK-NEXT: popl %eax
diff --git a/llvm/test/CodeGen/X86/nontemporal.ll b/llvm/test/CodeGen/X86/nontemporal.ll
index 104a90a6283f..ae80496bcdc2 100644
--- a/llvm/test/CodeGen/X86/nontemporal.ll
+++ b/llvm/test/CodeGen/X86/nontemporal.ll
@@ -20,21 +20,21 @@ define i32 @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4
; X86-SSE-NEXT: movl 8(%ebp), %esi
; X86-SSE-NEXT: movl 80(%ebp), %edx
; X86-SSE-NEXT: movl (%edx), %eax
-; X86-SSE-NEXT: addps {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: addps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movntps %xmm0, (%esi)
-; X86-SSE-NEXT: paddq {{\.LCPI.*}}, %xmm2
+; X86-SSE-NEXT: paddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm2, (%esi)
-; X86-SSE-NEXT: addpd {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: addpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntpd %xmm1, (%esi)
-; X86-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm6
+; X86-SSE-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm6
; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm6, (%esi)
-; X86-SSE-NEXT: paddw {{\.LCPI.*}}, %xmm5
+; X86-SSE-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm5
; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm5, (%esi)
-; X86-SSE-NEXT: paddb {{\.LCPI.*}}, %xmm4
+; X86-SSE-NEXT: paddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE-NEXT: addl (%edx), %eax
; X86-SSE-NEXT: movntdq %xmm4, (%esi)
; X86-SSE-NEXT: addl (%edx), %eax
@@ -62,21 +62,21 @@ define i32 @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4
; X86-AVX-NEXT: movl 8(%ebp), %edx
; X86-AVX-NEXT: movl 80(%ebp), %esi
; X86-AVX-NEXT: movl (%esi), %eax
-; X86-AVX-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vaddps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovntps %xmm0, (%edx)
-; X86-AVX-NEXT: vpaddq {{\.LCPI.*}}, %xmm2, %xmm0
+; X86-AVX-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm2, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
-; X86-AVX-NEXT: vaddpd {{\.LCPI.*}}, %xmm1, %xmm0
+; X86-AVX-NEXT: vaddpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntpd %xmm0, (%edx)
-; X86-AVX-NEXT: vpaddd {{\.LCPI.*}}, %xmm6, %xmm0
+; X86-AVX-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm6, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
-; X86-AVX-NEXT: vpaddw {{\.LCPI.*}}, %xmm5, %xmm0
+; X86-AVX-NEXT: vpaddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm5, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
-; X86-AVX-NEXT: vpaddb {{\.LCPI.*}}, %xmm4, %xmm0
+; X86-AVX-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm4, %xmm0
; X86-AVX-NEXT: addl (%esi), %eax
; X86-AVX-NEXT: vmovntdq %xmm0, (%edx)
; X86-AVX-NEXT: addl (%esi), %eax
diff --git a/llvm/test/CodeGen/X86/packss.ll b/llvm/test/CodeGen/X86/packss.ll
index 16349ae2c7f9..c395cf84ce34 100644
--- a/llvm/test/CodeGen/X86/packss.ll
+++ b/llvm/test/CodeGen/X86/packss.ll
@@ -121,14 +121,14 @@ define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwi
; X86-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrad $31, %xmm0
-; X86-SSE-NEXT: pcmpgtd {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pcmpgtd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: packssdw %xmm1, %xmm0
; X86-SSE-NEXT: retl
;
; X86-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrad $31, %xmm0, %xmm0
-; X86-AVX-NEXT: vpcmpgtd {{\.LCPI.*}}, %xmm1, %xmm1
+; X86-AVX-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/peep-test-1.ll b/llvm/test/CodeGen/X86/peep-test-1.ll
index 88762cac19b5..7a356c1b361f 100644
--- a/llvm/test/CodeGen/X86/peep-test-1.ll
+++ b/llvm/test/CodeGen/X86/peep-test-1.ll
@@ -10,7 +10,7 @@ define void @foo(i32 %n, double* nocapture %p) nounwind {
; CHECK-NEXT: .LBB0_1: # %bb
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: fldl (%eax,%ecx,8)
-; CHECK-NEXT: fmull {{\.LCPI.*}}
+; CHECK-NEXT: fmull {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fstpl (%eax,%ecx,8)
; CHECK-NEXT: decl %ecx
; CHECK-NEXT: js .LBB0_1
diff --git a/llvm/test/CodeGen/X86/pointer-vector.ll b/llvm/test/CodeGen/X86/pointer-vector.ll
index 67b389d2e5b6..04e2a2a350c4 100644
--- a/llvm/test/CodeGen/X86/pointer-vector.ll
+++ b/llvm/test/CodeGen/X86/pointer-vector.ll
@@ -133,7 +133,7 @@ define <4 x i32> @ICMP0(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-NEXT: movdqa (%ecx), %xmm0
; CHECK-NEXT: pcmpgtd (%eax), %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6]
-; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1
+; CHECK-NEXT: blendvps %xmm0, {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retl
entry:
@@ -152,7 +152,7 @@ define <4 x i32> @ICMP1(<4 x i8*>* %p0, <4 x i8*>* %p1) nounwind {
; CHECK-NEXT: movdqa (%ecx), %xmm0
; CHECK-NEXT: pcmpeqd (%eax), %xmm0
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [9,8,7,6]
-; CHECK-NEXT: blendvps %xmm0, {{\.LCPI.*}}, %xmm1
+; CHECK-NEXT: blendvps %xmm0, {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; CHECK-NEXT: movaps %xmm1, %xmm0
; CHECK-NEXT: retl
entry:
diff --git a/llvm/test/CodeGen/X86/popcnt.ll b/llvm/test/CodeGen/X86/popcnt.ll
index 3fe9871bae65..fc35ac478071 100644
--- a/llvm/test/CodeGen/X86/popcnt.ll
+++ b/llvm/test/CodeGen/X86/popcnt.ll
@@ -253,7 +253,7 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $1, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -264,7 +264,7 @@ define i64 @cnt64(i64 %x) nounwind readnone {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: paddb %xmm0, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm0, %xmm0
; X86-SSE2-NEXT: psadbw %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
@@ -749,7 +749,7 @@ define i64 @cnt64_optsize(i64 %x) nounwind readnone optsize {
; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $1, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -760,7 +760,7 @@ define i64 @cnt64_optsize(i64 %x) nounwind readnone optsize {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: paddb %xmm0, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm0, %xmm0
; X86-SSE2-NEXT: psadbw %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
@@ -1178,7 +1178,7 @@ define i64 @cnt64_pgso(i64 %x) nounwind readnone !prof !14 {
; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $1, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
@@ -1189,7 +1189,7 @@ define i64 @cnt64_pgso(i64 %x) nounwind readnone !prof !14 {
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: paddb %xmm0, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm0, %xmm0
; X86-SSE2-NEXT: psadbw %xmm1, %xmm0
; X86-SSE2-NEXT: movd %xmm0, %eax
diff --git a/llvm/test/CodeGen/X86/pr15309.ll b/llvm/test/CodeGen/X86/pr15309.ll
index e154af43b568..91dfdf5bdf0b 100644
--- a/llvm/test/CodeGen/X86/pr15309.ll
+++ b/llvm/test/CodeGen/X86/pr15309.ll
@@ -19,10 +19,10 @@ define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>*
; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp)
; CHECK-NEXT: shrl $31, %ecx
; CHECK-NEXT: fildll (%esp)
-; CHECK-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; CHECK-NEXT: shrl $31, %esi
; CHECK-NEXT: fildll {{[0-9]+}}(%esp)
-; CHECK-NEXT: fadds {{\.LCPI.*}}(,%esi,4)
+; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%esi,4)
; CHECK-NEXT: fstps 84(%eax)
; CHECK-NEXT: fstps 80(%eax)
; CHECK-NEXT: addl $20, %esp
diff --git a/llvm/test/CodeGen/X86/pr34080-2.ll b/llvm/test/CodeGen/X86/pr34080-2.ll
index b09f2a274bb0..dee288599247 100644
--- a/llvm/test/CodeGen/X86/pr34080-2.ll
+++ b/llvm/test/CodeGen/X86/pr34080-2.ll
@@ -48,8 +48,8 @@ define void @computeJD(%struct.DateTime*) nounwind {
; CHECK-NEXT: leal 257(%ecx,%edx), %eax
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: fildl {{[0-9]+}}(%esp)
-; CHECK-NEXT: fadds {{\.LCPI.*}}
-; CHECK-NEXT: fmuls {{\.LCPI.*}}
+; CHECK-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}
+; CHECK-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: orl $3072, %eax # imm = 0xC00
@@ -62,7 +62,7 @@ define void @computeJD(%struct.DateTime*) nounwind {
; CHECK-NEXT: imull $60000, 24(%ebx), %ecx # imm = 0xEA60
; CHECK-NEXT: addl %eax, %ecx
; CHECK-NEXT: fldl 28(%ebx)
-; CHECK-NEXT: fmuls {{\.LCPI.*}}
+; CHECK-NEXT: fmuls {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: fnstcw {{[0-9]+}}(%esp)
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: orl $3072, %eax # imm = 0xC00
diff --git a/llvm/test/CodeGen/X86/pr34605.ll b/llvm/test/CodeGen/X86/pr34605.ll
index 4c1a3d7781e2..2bd9c03be46c 100644
--- a/llvm/test/CodeGen/X86/pr34605.ll
+++ b/llvm/test/CodeGen/X86/pr34605.ll
@@ -6,18 +6,18 @@ define void @pr34605(i8* nocapture %s, i32 %p) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %zmm0
-; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k0
-; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1
+; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k0
+; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k1
; CHECK-NEXT: kunpckwd %k0, %k1, %k0
-; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1
-; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k2
+; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k1
+; CHECK-NEXT: vpcmpeqd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %k2
; CHECK-NEXT: kunpckwd %k1, %k2, %k1
; CHECK-NEXT: kunpckdq %k0, %k1, %k0
; CHECK-NEXT: movl $1, %ecx
; CHECK-NEXT: kmovd %ecx, %k1
; CHECK-NEXT: kmovd %k1, %k1
; CHECK-NEXT: kandq %k1, %k0, %k1
-; CHECK-NEXT: vmovdqu8 {{\.LCPI.*}}, %zmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqu8 {{\.LCPI[0-9]+_[0-9]+}}, %zmm0 {%k1} {z}
; CHECK-NEXT: vmovdqu64 %zmm0, (%eax)
; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqu64 %zmm0, 64(%eax)
diff --git a/llvm/test/CodeGen/X86/pr40539.ll b/llvm/test/CodeGen/X86/pr40539.ll
index a727e83cdca7..8f98a7a35fa2 100644
--- a/llvm/test/CodeGen/X86/pr40539.ll
+++ b/llvm/test/CodeGen/X86/pr40539.ll
@@ -41,7 +41,7 @@ define zeroext i1 @_Z8test_cosv() {
; CHECK-NEXT: .cfi_def_cfa_offset 12
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; CHECK-NEXT: divss {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: divss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
; CHECK-NEXT: flds {{[0-9]+}}(%esp)
; CHECK-NEXT: #APP
@@ -51,7 +51,7 @@ define zeroext i1 @_Z8test_cosv() {
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: ucomiss %xmm0, %xmm1
; CHECK-NEXT: setae %cl
-; CHECK-NEXT: ucomiss {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: ucomiss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: setae %al
; CHECK-NEXT: andb %cl, %al
; CHECK-NEXT: addl $8, %esp
diff --git a/llvm/test/CodeGen/X86/pr40891.ll b/llvm/test/CodeGen/X86/pr40891.ll
index d67739767b21..1455f72e810a 100644
--- a/llvm/test/CodeGen/X86/pr40891.ll
+++ b/llvm/test/CodeGen/X86/pr40891.ll
@@ -7,7 +7,7 @@ define <8 x i32> @foo(<8 x i64> %x, <4 x i64> %y) {
; CHECK-LABEL: foo:
; CHECK: # %bb.0:
; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0
-; CHECK-NEXT: vandps {{\.LCPI.*}}, %ymm1, %ymm1
+; CHECK-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
diff --git a/llvm/test/CodeGen/X86/pr46527.ll b/llvm/test/CodeGen/X86/pr46527.ll
index 48b1095a6329..076d8137120f 100644
--- a/llvm/test/CodeGen/X86/pr46527.ll
+++ b/llvm/test/CodeGen/X86/pr46527.ll
@@ -22,7 +22,7 @@ define void @f(<16 x i8>* %out, <16 x i8> %in, i1 %flag) {
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; CHECK-NEXT: paddb %xmm1, %xmm1
; CHECK-NEXT: pxor %xmm0, %xmm1
-; CHECK-NEXT: pxor {{\.LCPI.*}}@GOTOFF(%eax), %xmm1
+; CHECK-NEXT: pxor {{\.LCPI[0-9]+_[0-9]+}}@GOTOFF(%eax), %xmm1
; CHECK-NEXT: movdqa %xmm1, (%ecx)
; CHECK-NEXT: retl
entry:
diff --git a/llvm/test/CodeGen/X86/pr47299.ll b/llvm/test/CodeGen/X86/pr47299.ll
index 2f5d07802c7c..93710b87751e 100644
--- a/llvm/test/CodeGen/X86/pr47299.ll
+++ b/llvm/test/CodeGen/X86/pr47299.ll
@@ -13,7 +13,7 @@ define <7 x i1> @create_mask7(i64 %0) {
; CHECK: # %bb.0:
; CHECK-NEXT: mov rax, rdi
; CHECK-NEXT: vpbroadcastq zmm0, rsi
-; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kshiftrb k1, k0, 6
; CHECK-NEXT: kmovd r8d, k1
; CHECK-NEXT: kshiftrb k1, k0, 5
@@ -57,8 +57,8 @@ define <16 x i1> @create_mask16(i64 %0) {
; CHECK-LABEL: create_mask16:
; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq zmm0, rdi
-; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k0, k1, k0
; CHECK-NEXT: vpmovm2b xmm0, k0
; CHECK-NEXT: vzeroupper
@@ -71,11 +71,11 @@ define <32 x i1> @create_mask32(i64 %0) {
; CHECK-LABEL: create_mask32:
; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq zmm0, rdi
-; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k0, k1, k0
-; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k1, k1, k2
; CHECK-NEXT: kunpckwd k0, k1, k0
; CHECK-NEXT: vpmovm2b ymm0, k0
@@ -88,18 +88,18 @@ define <64 x i1> @create_mask64(i64 %0) {
; CHECK-LABEL: create_mask64:
; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastq zmm0, rdi
-; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k0, k1, k0
-; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k1, k1, k2
-; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckwd k0, k1, k0
-; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k1, k1, k2
-; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleuq k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleuq k3, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckbw k2, k3, k2
; CHECK-NEXT: kunpckwd k1, k2, k1
; CHECK-NEXT: kunpckdq k0, k1, k0
@@ -113,7 +113,7 @@ define <16 x i1> @create_mask16_i32(i32 %0) {
; CHECK-LABEL: create_mask16_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd zmm0, edi
-; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: vpmovm2b xmm0, k0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret
@@ -125,11 +125,11 @@ define <64 x i1> @create_mask64_i32(i32 %0) {
; CHECK-LABEL: create_mask64_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vpbroadcastd zmm0, edi
-; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
-; CHECK-NEXT: vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleud k0, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
+; CHECK-NEXT: vpcmpnleud k2, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckwd k0, k1, k0
-; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI.*}}]
+; CHECK-NEXT: vpcmpnleud k1, zmm0, zmmword ptr [rip + {{\.LCPI[0-9]+_[0-9]+}}]
; CHECK-NEXT: kunpckwd k1, k1, k2
; CHECK-NEXT: kunpckdq k0, k1, k0
; CHECK-NEXT: vpmovm2b zmm0, k0
diff --git a/llvm/test/CodeGen/X86/rotate-extract-vector.ll b/llvm/test/CodeGen/X86/rotate-extract-vector.ll
index 7b7feb3372f2..cc3664176e35 100644
--- a/llvm/test/CodeGen/X86/rotate-extract-vector.ll
+++ b/llvm/test/CodeGen/X86/rotate-extract-vector.ll
@@ -109,7 +109,7 @@ define <4 x i32> @vrolw_extract_mul_with_mask(<4 x i32> %i) nounwind {
; X86-NEXT: vpbroadcastd {{.*#+}} xmm1 = [9,9,9,9]
; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X86-NEXT: vprold $7, %zmm0, %zmm0
-; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: vzeroupper
; X86-NEXT: retl
;
@@ -132,8 +132,8 @@ define <4 x i32> @vrolw_extract_mul_with_mask(<4 x i32> %i) nounwind {
define <32 x i16> @illegal_no_extract_mul(<32 x i16> %i) nounwind {
; X86-LABEL: illegal_no_extract_mul:
; X86: # %bb.0:
-; X86-NEXT: vpmullw {{\.LCPI.*}}, %zmm0, %zmm1
-; X86-NEXT: vpmullw {{\.LCPI.*}}, %zmm0, %zmm0
+; X86-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm1
+; X86-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; X86-NEXT: vpsrlw $10, %zmm0, %zmm0
; X86-NEXT: vporq %zmm0, %zmm1, %zmm0
; X86-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
index 0ee1cac1fa55..f4d08960be0a 100644
--- a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
+++ b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll
@@ -267,7 +267,7 @@ define i64 @f_to_u64(float %a) nounwind {
; X87-LIN: # %bb.0:
; X87-LIN-NEXT: subl $20, %esp
; X87-LIN-NEXT: flds {{[0-9]+}}(%esp)
-; X87-LIN-NEXT: flds {{\.LCPI.*}}
+; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-LIN-NEXT: fucom %st(1)
; X87-LIN-NEXT: fnstsw %ax
; X87-LIN-NEXT: xorl %edx, %edx
@@ -691,7 +691,7 @@ define i64 @d_to_u64(double %a) nounwind {
; X87-LIN: # %bb.0:
; X87-LIN-NEXT: subl $20, %esp
; X87-LIN-NEXT: fldl {{[0-9]+}}(%esp)
-; X87-LIN-NEXT: flds {{\.LCPI.*}}
+; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-LIN-NEXT: fucom %st(1)
; X87-LIN-NEXT: fnstsw %ax
; X87-LIN-NEXT: xorl %edx, %edx
@@ -914,7 +914,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X86-AVX512-LIN: # %bb.0:
; X86-AVX512-LIN-NEXT: subl $12, %esp
; X86-AVX512-LIN-NEXT: fldt {{[0-9]+}}(%esp)
-; X86-AVX512-LIN-NEXT: flds {{\.LCPI.*}}
+; X86-AVX512-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-AVX512-LIN-NEXT: xorl %edx, %edx
; X86-AVX512-LIN-NEXT: fucomi %st(1), %st
; X86-AVX512-LIN-NEXT: fldz
@@ -990,7 +990,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X86-SSE3-LIN: # %bb.0:
; X86-SSE3-LIN-NEXT: subl $12, %esp
; X86-SSE3-LIN-NEXT: fldt {{[0-9]+}}(%esp)
-; X86-SSE3-LIN-NEXT: flds {{\.LCPI.*}}
+; X86-SSE3-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE3-LIN-NEXT: xorl %edx, %edx
; X86-SSE3-LIN-NEXT: fucomi %st(1), %st
; X86-SSE3-LIN-NEXT: fldz
@@ -1072,7 +1072,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X86-SSE2-LIN: # %bb.0:
; X86-SSE2-LIN-NEXT: subl $20, %esp
; X86-SSE2-LIN-NEXT: fldt {{[0-9]+}}(%esp)
-; X86-SSE2-LIN-NEXT: flds {{\.LCPI.*}}
+; X86-SSE2-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X86-SSE2-LIN-NEXT: xorl %edx, %edx
; X86-SSE2-LIN-NEXT: fucomi %st(1), %st
; X86-SSE2-LIN-NEXT: setbe %dl
@@ -1180,7 +1180,7 @@ define i64 @x_to_u64(x86_fp80 %a) nounwind {
; X87-LIN: # %bb.0:
; X87-LIN-NEXT: subl $20, %esp
; X87-LIN-NEXT: fldt {{[0-9]+}}(%esp)
-; X87-LIN-NEXT: flds {{\.LCPI.*}}
+; X87-LIN-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; X87-LIN-NEXT: fucom %st(1)
; X87-LIN-NEXT: fnstsw %ax
; X87-LIN-NEXT: xorl %edx, %edx
diff --git a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll
index 9f74ad0599e4..eedf744f1592 100644
--- a/llvm/test/CodeGen/X86/scalar-int-to-fp.ll
+++ b/llvm/test/CodeGen/X86/scalar-int-to-fp.ll
@@ -33,8 +33,8 @@ define float @u32_to_f(i32 %a) nounwind {
; SSE2_32: # %bb.0:
; SSE2_32-NEXT: pushl %eax
; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2_32-NEXT: orpd {{\.LCPI.*}}, %xmm0
-; SSE2_32-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; SSE2_32-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE2_32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: cvtsd2ss %xmm0, %xmm0
; SSE2_32-NEXT: movss %xmm0, (%esp)
; SSE2_32-NEXT: flds (%esp)
@@ -147,8 +147,8 @@ define double @u32_to_d(i32 %a) nounwind {
; SSE2_32-NEXT: andl $-8, %esp
; SSE2_32-NEXT: subl $8, %esp
; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; SSE2_32-NEXT: orpd {{\.LCPI.*}}, %xmm0
-; SSE2_32-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; SSE2_32-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE2_32-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: movsd %xmm0, (%esp)
; SSE2_32-NEXT: fldl (%esp)
; SSE2_32-NEXT: movl %ebp, %esp
@@ -333,7 +333,7 @@ define float @u64_to_f(i64 %a) nounwind {
; AVX512F_32-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; AVX512F_32-NEXT: shrl $31, %eax
; AVX512F_32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX512F_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX512F_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX512F_32-NEXT: fstps {{[0-9]+}}(%esp)
; AVX512F_32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX512F_32-NEXT: vmovss %xmm0, (%esp)
@@ -353,7 +353,7 @@ define float @u64_to_f(i64 %a) nounwind {
; SSE2_32-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; SSE2_32-NEXT: shrl $31, %eax
; SSE2_32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE2_32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE2_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE2_32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE2_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE2_32-NEXT: movss %xmm0, (%esp)
@@ -392,7 +392,7 @@ define float @u64_to_f(i64 %a) nounwind {
; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: shrl $31, %ecx
; SSE1_32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; SSE1_32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE1_32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; SSE1_32-NEXT: movss %xmm0, (%esp)
@@ -413,7 +413,7 @@ define float @u64_to_f(i64 %a) nounwind {
; X87-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll {{[0-9]+}}(%esp)
-; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstps {{[0-9]+}}(%esp)
; X87-NEXT: flds {{[0-9]+}}(%esp)
; X87-NEXT: movl %ebp, %esp
@@ -652,7 +652,7 @@ define double @u64_to_d(i64 %a) nounwind {
; AVX512F_32-NEXT: subl $8, %esp
; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; AVX512F_32-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512F_32-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512F_32-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512F_32-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512F_32-NEXT: vmovsd %xmm0, (%esp)
@@ -669,7 +669,7 @@ define double @u64_to_d(i64 %a) nounwind {
; SSE2_32-NEXT: subl $8, %esp
; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0
+; SSE2_32-NEXT: subpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: movapd %xmm0, %xmm1
; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2_32-NEXT: addsd %xmm0, %xmm1
@@ -701,7 +701,7 @@ define double @u64_to_d(i64 %a) nounwind {
; SSE1_32-NEXT: movl %eax, (%esp)
; SSE1_32-NEXT: shrl $31, %ecx
; SSE1_32-NEXT: fildll (%esp)
-; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: movl %ebp, %esp
@@ -720,7 +720,7 @@ define double @u64_to_d(i64 %a) nounwind {
; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp)
-; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: movl %ebp, %esp
@@ -774,7 +774,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; AVX512F_32-NEXT: subl $8, %esp
; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX512F_32-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; AVX512F_32-NEXT: vsubpd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512F_32-NEXT: vsubpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512F_32-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX512F_32-NEXT: vmovlpd %xmm0, (%esp)
; AVX512F_32-NEXT: fldl (%esp)
@@ -790,7 +790,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; SSE2_32-NEXT: subl $8, %esp
; SSE2_32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2_32-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
-; SSE2_32-NEXT: subpd {{\.LCPI.*}}, %xmm0
+; SSE2_32-NEXT: subpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE2_32-NEXT: movapd %xmm0, %xmm1
; SSE2_32-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2_32-NEXT: addsd %xmm0, %xmm1
@@ -822,7 +822,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; SSE1_32-NEXT: movl %eax, (%esp)
; SSE1_32-NEXT: shrl $31, %ecx
; SSE1_32-NEXT: fildll (%esp)
-; SSE1_32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; SSE1_32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; SSE1_32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: fldl {{[0-9]+}}(%esp)
; SSE1_32-NEXT: movl %ebp, %esp
@@ -841,7 +841,7 @@ define double @u64_to_d_optsize(i64 %a) nounwind optsize {
; X87-NEXT: movl %eax, (%esp)
; X87-NEXT: shrl $31, %ecx
; X87-NEXT: fildll (%esp)
-; X87-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; X87-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; X87-NEXT: fstpl {{[0-9]+}}(%esp)
; X87-NEXT: fldl {{[0-9]+}}(%esp)
; X87-NEXT: movl %ebp, %esp
@@ -1076,7 +1076,7 @@ define x86_fp80 @u64_to_x(i64 %a) nounwind {
; CHECK32-NEXT: movl %eax, (%esp)
; CHECK32-NEXT: shrl $31, %ecx
; CHECK32-NEXT: fildll (%esp)
-; CHECK32-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
+; CHECK32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; CHECK32-NEXT: movl %ebp, %esp
; CHECK32-NEXT: popl %ebp
; CHECK32-NEXT: retl
@@ -1088,7 +1088,7 @@ define x86_fp80 @u64_to_x(i64 %a) nounwind {
; CHECK64-NEXT: testq %rdi, %rdi
; CHECK64-NEXT: sets %al
; CHECK64-NEXT: fildll -{{[0-9]+}}(%rsp)
-; CHECK64-NEXT: fadds {{\.LCPI.*}}(,%rax,4)
+; CHECK64-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%rax,4)
; CHECK64-NEXT: retq
%r = uitofp i64 %a to x86_fp80
ret x86_fp80 %r
diff --git a/llvm/test/CodeGen/X86/select-of-fp-constants.ll b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
index 0a3e9f93c33e..934f0f8b9465 100644
--- a/llvm/test/CodeGen/X86/select-of-fp-constants.ll
+++ b/llvm/test/CodeGen/X86/select-of-fp-constants.ll
@@ -16,7 +16,7 @@ define float @icmp_select_fp_constants(i32 %x) nounwind readnone {
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: cmpl $0, {{[0-9]+}}(%esp)
; X86-NEXT: sete %al
-; X86-NEXT: flds {{\.LCPI.*}}(,%eax,4)
+; X86-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: retl
;
; X64-SSE-LABEL: icmp_select_fp_constants:
@@ -46,7 +46,7 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X86-SSE-NEXT: cmpneqss {{[0-9]+}}(%esp), %xmm0
; X86-SSE-NEXT: movd %xmm0, %eax
; X86-SSE-NEXT: andl $1, %eax
-; X86-SSE-NEXT: flds {{\.LCPI.*}}(,%eax,4)
+; X86-SSE-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-SSE-NEXT: retl
;
; X86-AVX2-LABEL: fcmp_select_fp_constants:
@@ -55,15 +55,15 @@ define float @fcmp_select_fp_constants(float %x) nounwind readnone {
; X86-AVX2-NEXT: vcmpneqss {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
; X86-AVX2-NEXT: andl $1, %eax
-; X86-AVX2-NEXT: flds {{\.LCPI.*}}(,%eax,4)
+; X86-AVX2-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-AVX2-NEXT: retl
;
; X86-AVX512F-LABEL: fcmp_select_fp_constants:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-AVX512F-NEXT: vcmpneqss {{\.LCPI.*}}, %xmm0, %k0
+; X86-AVX512F-NEXT: vcmpneqss {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %k0
; X86-AVX512F-NEXT: kmovw %k0, %eax
-; X86-AVX512F-NEXT: flds {{\.LCPI.*}}(,%eax,4)
+; X86-AVX512F-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-AVX512F-NEXT: retl
;
; X64-SSE-LABEL: fcmp_select_fp_constants:
diff --git a/llvm/test/CodeGen/X86/select.ll b/llvm/test/CodeGen/X86/select.ll
index 012f7f035fec..34c7911220c4 100644
--- a/llvm/test/CodeGen/X86/select.ll
+++ b/llvm/test/CodeGen/X86/select.ll
@@ -163,7 +163,7 @@ define float @test3(i32 %x) nounwind readnone {
; MCU-NEXT: xorl %ecx, %ecx
; MCU-NEXT: testl %eax, %eax
; MCU-NEXT: sete %cl
-; MCU-NEXT: flds {{\.LCPI.*}}(,%ecx,4)
+; MCU-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}(,%ecx,4)
; MCU-NEXT: retl
entry:
%0 = icmp eq i32 %x, 0
@@ -197,7 +197,7 @@ define signext i8 @test4(i8* nocapture %P, double %F) nounwind readonly {
; MCU: # %bb.0: # %entry
; MCU-NEXT: movl %eax, %ecx
; MCU-NEXT: fldl {{[0-9]+}}(%esp)
-; MCU-NEXT: flds {{\.LCPI.*}}
+; MCU-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; MCU-NEXT: fucompp
; MCU-NEXT: fnstsw %ax
; MCU-NEXT: xorl %edx, %edx
@@ -422,7 +422,7 @@ define x86_fp80 @test7(i32 %tmp8) nounwind {
; MCU-NEXT: notl %eax
; MCU-NEXT: shrl $27, %eax
; MCU-NEXT: andl $-16, %eax
-; MCU-NEXT: fldt {{\.LCPI.*}}(%eax)
+; MCU-NEXT: fldt {{\.LCPI[0-9]+_[0-9]+}}(%eax)
; MCU-NEXT: retl
%tmp9 = icmp sgt i32 %tmp8, -1
%retval = select i1 %tmp9, x86_fp80 0xK4005B400000000000000, x86_fp80 0xK40078700000000000000
diff --git a/llvm/test/CodeGen/X86/setcc-lowering.ll b/llvm/test/CodeGen/X86/setcc-lowering.ll
index 34f3ad697ad5..e6c5bffe2dfc 100644
--- a/llvm/test/CodeGen/X86/setcc-lowering.ll
+++ b/llvm/test/CodeGen/X86/setcc-lowering.ll
@@ -22,7 +22,7 @@ define <8 x i16> @pr25080(<8 x i32> %a) {
; KNL-32-LABEL: pr25080:
; KNL-32: # %bb.0: # %entry
; KNL-32-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
-; KNL-32-NEXT: vptestnmd {{\.LCPI.*}}{1to16}, %zmm0, %k0
+; KNL-32-NEXT: vptestnmd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %k0
; KNL-32-NEXT: movb $15, %al
; KNL-32-NEXT: kmovw %eax, %k1
; KNL-32-NEXT: korw %k1, %k0, %k1
diff --git a/llvm/test/CodeGen/X86/shrink-fp-const2.ll b/llvm/test/CodeGen/X86/shrink-fp-const2.ll
index 670f268b6fa2..01d799b58982 100644
--- a/llvm/test/CodeGen/X86/shrink-fp-const2.ll
+++ b/llvm/test/CodeGen/X86/shrink-fp-const2.ll
@@ -4,7 +4,7 @@
define x86_fp80 @test2() nounwind {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: flds {{\.LCPI.*}}
+; CHECK-NEXT: flds {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: retl
entry:
ret x86_fp80 0xK3FFFC000000000000000
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index ce3a17e3e986..177d1206c960 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -1406,7 +1406,7 @@ define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl
;
@@ -1418,7 +1418,7 @@ define void @mul_2xi8_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1471,7 +1471,7 @@ define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: movd %ecx, %xmm0
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-SSE-NEXT: psraw $8, %xmm0
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,2,1,4,5,6,7]
; X86-SSE-NEXT: psrad $16, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
@@ -1485,7 +1485,7 @@ define void @mul_2xi8_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1540,7 +1540,7 @@ define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
-; X86-SSE-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl
;
@@ -1552,7 +1552,7 @@ define void @mul_2xi8_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; X86-AVX-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1621,7 +1621,7 @@ define void @mul_2xi8_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1693,7 +1693,7 @@ define void @mul_2xi8_varconst5(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1765,7 +1765,7 @@ define void @mul_2xi8_varconst6(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movzwl (%ecx,%eax), %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm0
; X86-AVX-NEXT: vpmovsxbd %xmm0, %xmm0
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1833,7 +1833,7 @@ define void @mul_2xi16_varconst1(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1897,7 +1897,7 @@ define void @mul_2xi16_varconst2(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -1947,7 +1947,7 @@ define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: movl c, %edx
; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE-NEXT: psrld $16, %xmm0
-; X86-SSE-NEXT: pmuludq {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psllq $32, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl
@@ -1959,7 +1959,7 @@ define void @mul_2xi16_varconst3(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
@@ -2009,7 +2009,7 @@ define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-SSE-NEXT: psrad $16, %xmm0
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
-; X86-SSE-NEXT: pmuludq {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmuludq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psllq $32, %xmm0
; X86-SSE-NEXT: movq %xmm0, (%edx,%eax,4)
; X86-SSE-NEXT: retl
@@ -2021,7 +2021,7 @@ define void @mul_2xi16_varconst4(i8* nocapture readonly %a, i64 %index) {
; X86-AVX-NEXT: movl c, %edx
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpmovsxwd %xmm0, %xmm0
-; X86-AVX-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovq %xmm0, (%edx,%eax,4)
; X86-AVX-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/sink-addsub-of-const.ll b/llvm/test/CodeGen/X86/sink-addsub-of-const.ll
index 5c7d4e0717a2..75241d9ea1b5 100644
--- a/llvm/test/CodeGen/X86/sink-addsub-of-const.ll
+++ b/llvm/test/CodeGen/X86/sink-addsub-of-const.ll
@@ -261,7 +261,7 @@ define <4 x i32> @vec_sink_add_of_const_to_add0(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_add0:
; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0
-; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vec_sink_add_of_const_to_add0:
@@ -277,7 +277,7 @@ define <4 x i32> @vec_sink_add_of_const_to_add1(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_add1:
; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0
-; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vec_sink_add_of_const_to_add1:
@@ -297,7 +297,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_add0(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_add0:
; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0
-; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vec_sink_sub_of_const_to_add0:
@@ -313,7 +313,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_add1(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_add1:
; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0
-; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vec_sink_sub_of_const_to_add1:
@@ -333,7 +333,7 @@ define <4 x i32> @vec_sink_sub_from_const_to_add0(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_from_const_to_add0:
; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1
-; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
@@ -351,7 +351,7 @@ define <4 x i32> @vec_sink_sub_from_const_to_add1(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_from_const_to_add1:
; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1
-; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
@@ -373,7 +373,7 @@ define <4 x i32> @vec_sink_add_of_const_to_sub(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_sub:
; X32: # %bb.0:
; X32-NEXT: psubd %xmm1, %xmm0
-; X32-NEXT: paddd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vec_sink_add_of_const_to_sub:
@@ -389,7 +389,7 @@ define <4 x i32> @vec_sink_add_of_const_to_sub2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_add_of_const_to_sub2:
; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1
-; X32-NEXT: psubd {{\.LCPI.*}}, %xmm1
+; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
@@ -411,7 +411,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_sub(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_sub:
; X32: # %bb.0:
; X32-NEXT: psubd %xmm1, %xmm0
-; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vec_sink_sub_of_const_to_sub:
@@ -427,7 +427,7 @@ define <4 x i32> @vec_sink_sub_of_const_to_sub2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_of_const_to_sub2:
; X32: # %bb.0:
; X32-NEXT: psubd %xmm0, %xmm1
-; X32-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X32-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: movdqa %xmm1, %xmm0
; X32-NEXT: retl
;
@@ -461,7 +461,7 @@ define <4 x i32> @vec_sink_sub_from_const_to_sub2(<4 x i32> %a, <4 x i32> %b) {
; X32-LABEL: vec_sink_sub_from_const_to_sub2:
; X32: # %bb.0:
; X32-NEXT: paddd %xmm1, %xmm0
-; X32-NEXT: psubd {{\.LCPI.*}}, %xmm0
+; X32-NEXT: psubd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: vec_sink_sub_from_const_to_sub2:
diff --git a/llvm/test/CodeGen/X86/slow-pmulld.ll b/llvm/test/CodeGen/X86/slow-pmulld.ll
index 4eece0d1135e..55834ab90a67 100644
--- a/llvm/test/CodeGen/X86/slow-pmulld.ll
+++ b/llvm/test/CodeGen/X86/slow-pmulld.ll
@@ -21,7 +21,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; CHECK32-LABEL: test_mul_v4i32_v4i8:
; CHECK32: # %bb.0:
; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
+; CHECK32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: test_mul_v4i32_v4i8:
@@ -33,7 +33,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; SSE4-32-LABEL: test_mul_v4i32_v4i8:
; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE4-32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: test_mul_v4i32_v4i8:
@@ -45,7 +45,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; AVX2-32-LABEL: test_mul_v4i32_v4i8:
; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX2-32-NEXT: retl
;
; AVX2-64-LABEL: test_mul_v4i32_v4i8:
@@ -57,7 +57,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8:
; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512DQ-32-NEXT: retl
;
; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8:
@@ -69,7 +69,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
; AVX512BW-32-LABEL: test_mul_v4i32_v4i8:
; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_mul_v4i32_v4i8:
@@ -168,7 +168,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; AVX2-32-LABEL: test_mul_v8i32_v8i8:
; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX2-32-NEXT: retl
;
; AVX2-64-LABEL: test_mul_v8i32_v8i8:
@@ -180,7 +180,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8:
; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512DQ-32-NEXT: retl
;
; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8:
@@ -192,7 +192,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; AVX512BW-32-LABEL: test_mul_v8i32_v8i8:
; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_mul_v8i32_v8i8:
@@ -359,7 +359,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8:
; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; AVX512DQ-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512DQ-32-NEXT: retl
;
; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8:
@@ -371,7 +371,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; AVX512BW-32-LABEL: test_mul_v16i32_v16i8:
; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %zmm0, %zmm0
+; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_mul_v16i32_v16i8:
@@ -383,7 +383,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; KNL-32-LABEL: test_mul_v16i32_v16i8:
; KNL-32: # %bb.0:
; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; KNL-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; KNL-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; KNL-32-NEXT: retl
;
; KNL-64-LABEL: test_mul_v16i32_v16i8:
@@ -418,7 +418,7 @@ define <4 x i32> @test_mul_v4i32_v4i16(<4 x i16> %A) {
; SSE4-32-LABEL: test_mul_v4i32_v4i16:
; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: test_mul_v4i32_v4i16:
@@ -666,7 +666,7 @@ define <16 x i32> @test_mul_v16i32_v16i16(<16 x i16> %A) {
; AVX512-32-LABEL: test_mul_v16i32_v16i16:
; AVX512-32: # %bb.0:
; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; AVX512-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512-32-NEXT: retl
;
; AVX512-64-LABEL: test_mul_v16i32_v16i16:
@@ -687,7 +687,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; CHECK32-LABEL: test_mul_v4i32_v4i8_minsize:
; CHECK32: # %bb.0:
; CHECK32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; CHECK32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
+; CHECK32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: test_mul_v4i32_v4i8_minsize:
@@ -699,7 +699,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; SSE4-32-LABEL: test_mul_v4i32_v4i8_minsize:
; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; SSE4-32-NEXT: pmaddwd {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: pmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: test_mul_v4i32_v4i8_minsize:
@@ -711,7 +711,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; AVX2-32-LABEL: test_mul_v4i32_v4i8_minsize:
; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX2-32-NEXT: retl
;
; AVX2-64-LABEL: test_mul_v4i32_v4i8_minsize:
@@ -723,7 +723,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; AVX512DQ-32-LABEL: test_mul_v4i32_v4i8_minsize:
; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512DQ-32-NEXT: retl
;
; AVX512DQ-64-LABEL: test_mul_v4i32_v4i8_minsize:
@@ -735,7 +735,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
; AVX512BW-32-LABEL: test_mul_v4i32_v4i8_minsize:
; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_mul_v4i32_v4i8_minsize:
@@ -826,7 +826,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; AVX2-32-LABEL: test_mul_v8i32_v8i8_minsize:
; AVX2-32: # %bb.0:
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX2-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX2-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX2-32-NEXT: retl
;
; AVX2-64-LABEL: test_mul_v8i32_v8i8_minsize:
@@ -838,7 +838,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; AVX512DQ-32-LABEL: test_mul_v8i32_v8i8_minsize:
; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX512DQ-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512DQ-32-NEXT: retl
;
; AVX512DQ-64-LABEL: test_mul_v8i32_v8i8_minsize:
@@ -850,7 +850,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; AVX512BW-32-LABEL: test_mul_v8i32_v8i8_minsize:
; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
-; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_mul_v8i32_v8i8_minsize:
@@ -997,7 +997,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; AVX512DQ-32-LABEL: test_mul_v16i32_v16i8_minsize:
; AVX512DQ-32: # %bb.0:
; AVX512DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512DQ-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; AVX512DQ-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512DQ-32-NEXT: retl
;
; AVX512DQ-64-LABEL: test_mul_v16i32_v16i8_minsize:
@@ -1009,7 +1009,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; AVX512BW-32-LABEL: test_mul_v16i32_v16i8_minsize:
; AVX512BW-32: # %bb.0:
; AVX512BW-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI.*}}, %zmm0, %zmm0
+; AVX512BW-32-NEXT: vpmaddwd {{\.LCPI[0-9]+_[0-9]+}}, %zmm0, %zmm0
; AVX512BW-32-NEXT: retl
;
; AVX512BW-64-LABEL: test_mul_v16i32_v16i8_minsize:
@@ -1021,7 +1021,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; KNL-32-LABEL: test_mul_v16i32_v16i8_minsize:
; KNL-32: # %bb.0:
; KNL-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
-; KNL-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; KNL-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; KNL-32-NEXT: retl
;
; KNL-64-LABEL: test_mul_v16i32_v16i8_minsize:
@@ -1038,7 +1038,7 @@ define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize {
; CHECK32-LABEL: test_mul_v4i32_v4i16_minsize:
; CHECK32: # %bb.0:
; CHECK32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; CHECK32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK32-NEXT: retl
;
; CHECK64-LABEL: test_mul_v4i32_v4i16_minsize:
@@ -1050,7 +1050,7 @@ define <4 x i32> @test_mul_v4i32_v4i16_minsize(<4 x i16> %A) minsize {
; SSE4-32-LABEL: test_mul_v4i32_v4i16_minsize:
; SSE4-32: # %bb.0:
; SSE4-32-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE4-32-NEXT: retl
;
; SSE4-64-LABEL: test_mul_v4i32_v4i16_minsize:
@@ -1260,7 +1260,7 @@ define <16 x i32> @test_mul_v16i32_v16i16_minsize(<16 x i16> %A) minsize {
; AVX512-32-LABEL: test_mul_v16i32_v16i16_minsize:
; AVX512-32: # %bb.0:
; AVX512-32-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512-32-NEXT: vpmulld {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; AVX512-32-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; AVX512-32-NEXT: retl
;
; AVX512-64-LABEL: test_mul_v16i32_v16i16_minsize:
diff --git a/llvm/test/CodeGen/X86/sse-fcopysign.ll b/llvm/test/CodeGen/X86/sse-fcopysign.ll
index fc49538e80b7..896516c27254 100644
--- a/llvm/test/CodeGen/X86/sse-fcopysign.ll
+++ b/llvm/test/CodeGen/X86/sse-fcopysign.ll
@@ -65,9 +65,9 @@ define float @int1(float %a, float %b) nounwind {
; X32: # %bb.0:
; X32-NEXT: pushl %eax
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-NEXT: andps {{\.LCPI.*}}, %xmm1
+; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: orps %xmm0, %xmm1
; X32-NEXT: movss %xmm1, (%esp)
; X32-NEXT: flds (%esp)
@@ -94,9 +94,9 @@ define double @int2(double %a, float %b, float %c) nounwind {
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: addss 20(%ebp), %xmm0
; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; X32-NEXT: andps {{\.LCPI.*}}, %xmm1
+; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-NEXT: cvtss2sd %xmm0, %xmm0
-; X32-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-NEXT: orps %xmm1, %xmm0
; X32-NEXT: movlps %xmm0, (%esp)
; X32-NEXT: fldl (%esp)
diff --git a/llvm/test/CodeGen/X86/sse-load-ret.ll b/llvm/test/CodeGen/X86/sse-load-ret.ll
index 841410bf8635..510c9e2bd8ba 100644
--- a/llvm/test/CodeGen/X86/sse-load-ret.ll
+++ b/llvm/test/CodeGen/X86/sse-load-ret.ll
@@ -14,7 +14,7 @@ define double @test1(double* %P) {
define double @test2() {
; CHECK-LABEL: test2:
; CHECK: # %bb.0:
-; CHECK-NEXT: fldl {{\.LCPI.*}}
+; CHECK-NEXT: fldl {{\.LCPI[0-9]+_[0-9]+}}
; CHECK-NEXT: retl
ret double 1.234560e+03
}
diff --git a/llvm/test/CodeGen/X86/sse1-fcopysign.ll b/llvm/test/CodeGen/X86/sse1-fcopysign.ll
index 529ac546ef15..28c7a3b80a03 100644
--- a/llvm/test/CodeGen/X86/sse1-fcopysign.ll
+++ b/llvm/test/CodeGen/X86/sse1-fcopysign.ll
@@ -7,7 +7,7 @@ define float @f32_pos(float %a, float %b) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
@@ -26,7 +26,7 @@ define float @f32_neg(float %a, float %b) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
@@ -43,7 +43,7 @@ define float @f32_neg(float %a, float %b) nounwind {
define <4 x float> @v4f32_pos(<4 x float> %a, <4 x float> %b) nounwind {
; X86-LABEL: v4f32_pos:
; X86: # %bb.0:
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: v4f32_pos:
@@ -57,7 +57,7 @@ define <4 x float> @v4f32_pos(<4 x float> %a, <4 x float> %b) nounwind {
define <4 x float> @v4f32_neg(<4 x float> %a, <4 x float> %b) nounwind {
; X86-LABEL: v4f32_neg:
; X86: # %bb.0:
-; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: v4f32_neg:
@@ -72,8 +72,8 @@ define <4 x float> @v4f32_const_mag(<4 x float> %a, <4 x float> %b) nounwind {
; X86-LABEL: v4f32_const_mag:
; X86: # %bb.0:
; X86-NEXT: movaps %xmm1, %xmm0
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
-; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: v4f32_const_mag:
diff --git a/llvm/test/CodeGen/X86/sse1.ll b/llvm/test/CodeGen/X86/sse1.ll
index e2b95eb7b93a..7a5654c6ab1d 100644
--- a/llvm/test/CodeGen/X86/sse1.ll
+++ b/llvm/test/CodeGen/X86/sse1.ll
@@ -180,7 +180,7 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X86-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; X86-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm2
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-NEXT: movaps %xmm2, (%eax)
; X86-NEXT: addl $16, %esp
; X86-NEXT: popl %esi
@@ -238,7 +238,7 @@ define <4 x i32> @PR30512(<4 x i32> %x, <4 x i32> %y) nounwind {
define <2 x float> @PR31672() #0 {
; X86-LABEL: PR31672:
; X86: # %bb.0:
-; X86-NEXT: sqrtps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: sqrtps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: PR31672:
diff --git a/llvm/test/CodeGen/X86/sse2.ll b/llvm/test/CodeGen/X86/sse2.ll
index eeb55d861205..49aefe0ec5e9 100644
--- a/llvm/test/CodeGen/X86/sse2.ll
+++ b/llvm/test/CodeGen/X86/sse2.ll
@@ -675,7 +675,7 @@ define <2 x i64> @test_insert_64_zext(<2 x i64> %i) {
define <4 x i32> @PR19721(<4 x i32> %i) {
; X86-SSE-LABEL: PR19721:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; AVX-LABEL: PR19721:
diff --git a/llvm/test/CodeGen/X86/sse3.ll b/llvm/test/CodeGen/X86/sse3.ll
index 750abbabb545..7a82e6812208 100644
--- a/llvm/test/CodeGen/X86/sse3.ll
+++ b/llvm/test/CodeGen/X86/sse3.ll
@@ -397,7 +397,7 @@ define <4 x i32> @t17() nounwind {
; X86-LABEL: t17:
; X86: # %bb.0: # %entry
; X86-NEXT: pshufd {{.*#+}} xmm0 = mem[0,1,0,1]
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: t17:
diff --git a/llvm/test/CodeGen/X86/uint64-to-float.ll b/llvm/test/CodeGen/X86/uint64-to-float.ll
index fdab443921c3..ab230c65e7dc 100644
--- a/llvm/test/CodeGen/X86/uint64-to-float.ll
+++ b/llvm/test/CodeGen/X86/uint64-to-float.ll
@@ -18,7 +18,7 @@ define float @test(i64 %a) nounwind {
; X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-NEXT: shrl $31, %eax
; X86-NEXT: fildll {{[0-9]+}}(%esp)
-; X86-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; X86-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; X86-NEXT: fstps {{[0-9]+}}(%esp)
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: movss %xmm0, (%esp)
diff --git a/llvm/test/CodeGen/X86/uint_to_fp-2.ll b/llvm/test/CodeGen/X86/uint_to_fp-2.ll
index c9211540329b..da27f04e2df1 100644
--- a/llvm/test/CodeGen/X86/uint_to_fp-2.ll
+++ b/llvm/test/CodeGen/X86/uint_to_fp-2.ll
@@ -7,8 +7,8 @@ define float @test1(i32 %x) nounwind readnone {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT: orpd {{\.LCPI.*}}, %xmm0
-; CHECK-NEXT: subsd {{\.LCPI.*}}, %xmm0
+; CHECK-NEXT: orpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; CHECK-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; CHECK-NEXT: cvtsd2ss %xmm0, %xmm0
; CHECK-NEXT: movss %xmm0, (%esp)
; CHECK-NEXT: flds (%esp)
@@ -26,8 +26,8 @@ define float @test2(<4 x i32> %x) nounwind readnone ssp {
; CHECK-NEXT: pushl %eax
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
-; CHECK-NEXT: orps {{\.LCPI.*}}, %xmm1
-; CHECK-NEXT: subsd {{\.LCPI.*}}, %xmm1
+; CHECK-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; CHECK-NEXT: subsd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0
; CHECK-NEXT: movss %xmm0, (%esp)
diff --git a/llvm/test/CodeGen/X86/uint_to_fp-3.ll b/llvm/test/CodeGen/X86/uint_to_fp-3.ll
index e3ac60cf77ef..b6846a68f23b 100644
--- a/llvm/test/CodeGen/X86/uint_to_fp-3.ll
+++ b/llvm/test/CodeGen/X86/uint_to_fp-3.ll
@@ -9,13 +9,13 @@
define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f32:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: mask_ucvt_4i32_4f32:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
@@ -38,7 +38,7 @@ define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) {
define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
; X32-SSE-LABEL: mask_ucvt_4i32_4f64:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm2
; X32-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; X32-SSE-NEXT: cvtdq2pd %xmm0, %xmm1
@@ -47,7 +47,7 @@ define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) {
;
; X32-AVX-LABEL: mask_ucvt_4i32_4f64:
; X32-AVX: # %bb.0:
-; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0
; X32-AVX-NEXT: retl
;
@@ -80,7 +80,7 @@ define <4 x float> @lshr_truncate_mask_ucvt_4i64_4f32(<4 x i64> *%p0) {
; X32-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; X32-SSE-NEXT: psrld $16, %xmm0
; X32-SSE-NEXT: cvtdq2ps %xmm0, %xmm0
-; X32-SSE-NEXT: mulps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: mulps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
@@ -90,7 +90,7 @@ define <4 x float> @lshr_truncate_mask_ucvt_4i64_4f32(<4 x i64> *%p0) {
; X32-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2]
; X32-AVX-NEXT: vpsrld $16, %xmm0, %xmm0
; X32-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
-; X32-AVX-NEXT: vmulps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-AVX-NEXT: vmulps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32:
diff --git a/llvm/test/CodeGen/X86/urem-power-of-two.ll b/llvm/test/CodeGen/X86/urem-power-of-two.ll
index ba9b552df187..89555a61ea6b 100644
--- a/llvm/test/CodeGen/X86/urem-power-of-two.ll
+++ b/llvm/test/CodeGen/X86/urem-power-of-two.ll
@@ -106,7 +106,7 @@ define i8 @and_pow_2(i8 %x, i8 %y) {
define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_uniform_pow_2:
; X86: # %bb.0:
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_uniform_pow_2:
@@ -120,7 +120,7 @@ define <4 x i32> @vec_const_uniform_pow_2(<4 x i32> %x) {
define <4 x i32> @vec_const_nonuniform_pow_2(<4 x i32> %x) {
; X86-LABEL: vec_const_nonuniform_pow_2:
; X86: # %bb.0:
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vec_const_nonuniform_pow_2:
diff --git a/llvm/test/CodeGen/X86/var-permute-256.ll b/llvm/test/CodeGen/X86/var-permute-256.ll
index 98335d362f95..99e1382e6e91 100644
--- a/llvm/test/CodeGen/X86/var-permute-256.ll
+++ b/llvm/test/CodeGen/X86/var-permute-256.ll
@@ -34,7 +34,7 @@ define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -94,7 +94,7 @@ define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -454,7 +454,7 @@ define <4 x double> @var_shuffle_v4f64(<4 x double> %v, <4 x i64> %indices) noun
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vpermilpd %ymm4, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm3, %xmm3
-; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -514,7 +514,7 @@ define <8 x float> @var_shuffle_v8f32(<8 x float> %v, <8 x i32> %indices) nounwi
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -576,7 +576,7 @@ define <4 x i64> @var_shuffle_v4i64_from_v2i64(<2 x i64> %v, <4 x i64> %indices)
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
@@ -638,7 +638,7 @@ define <8 x i32> @var_shuffle_v8i32_from_v4i32(<4 x i32> %v, <8 x i32> %indices)
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -997,7 +997,7 @@ define <4 x double> @var_shuffle_v4f64_from_v2f64(<2 x double> %v, <4 x i64> %in
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm3
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtq {{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vpermilpd %ymm3, %ymm0, %ymm2
; AVX1-NEXT: vblendvpd %ymm1, %ymm2, %ymm0, %ymm0
@@ -1059,7 +1059,7 @@ define <8 x float> @var_shuffle_v8f32_from_v4f32(<4 x float> %v, <8 x i32> %indi
; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vpcmpgtd {{\.LCPI.*}}+{{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtd {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm1, %ymm2, %ymm0, %ymm0
; AVX1-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
index 4569e69b7e50..72fcb4124819 100644
--- a/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-fptoint-128.ll
@@ -2031,7 +2031,7 @@ define <2 x i8> @strict_vector_fptosi_v2f64_to_v2i8(<2 x double> %a) #0 {
; SSE-32-LABEL: strict_vector_fptosi_v2f64_to_v2i8:
; SSE-32: # %bb.0:
; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0
-; SSE-32-NEXT: andpd {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: andpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl
@@ -2082,7 +2082,7 @@ define <2 x i8> @strict_vector_fptoui_v2f64_to_v2i8(<2 x double> %a) #0 {
; SSE-32-LABEL: strict_vector_fptoui_v2f64_to_v2i8:
; SSE-32: # %bb.0:
; SSE-32-NEXT: cvttpd2dq %xmm0, %xmm0
-; SSE-32-NEXT: andpd {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: andpd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl
@@ -2134,7 +2134,7 @@ define <2 x i8> @strict_vector_fptosi_v2f32_to_v2i8(<2 x float> %a) #0 {
; SSE-32: # %bb.0:
; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
-; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl
@@ -2192,7 +2192,7 @@ define <2 x i8> @strict_vector_fptoui_v2f32_to_v2i8(<2 x float> %a) #0 {
; SSE-32: # %bb.0:
; SSE-32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
-; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: packuswb %xmm0, %xmm0
; SSE-32-NEXT: retl
@@ -3037,7 +3037,7 @@ define <4 x i32> @strict_vector_fptoui_v4f32_to_v4i32(<4 x float> %a) #0 {
; SSE-32-NEXT: movaps %xmm0, %xmm3
; SSE-32-NEXT: cmpltps %xmm2, %xmm3
; SSE-32-NEXT: movaps %xmm3, %xmm1
-; SSE-32-NEXT: andnps {{\.LCPI.*}}, %xmm1
+; SSE-32-NEXT: andnps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; SSE-32-NEXT: andnps %xmm2, %xmm3
; SSE-32-NEXT: subps %xmm3, %xmm0
; SSE-32-NEXT: cvttps2dq %xmm0, %xmm0
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
index 52639987f547..5c92db796933 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
@@ -278,14 +278,14 @@ define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
; SSE-32-NEXT: movd %xmm1, %eax
; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstps (%esp)
; SSE-32-NEXT: wait
; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE-32-NEXT: movd %xmm0, %eax
; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE-32-NEXT: wait
; SSE-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -347,14 +347,14 @@ define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
; SSE41-32-NEXT: movd %xmm1, %eax
; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstps (%esp)
; SSE41-32-NEXT: wait
; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE41-32-NEXT: movd %xmm0, %eax
; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstps {{[0-9]+}}(%esp)
; SSE41-32-NEXT: wait
; SSE41-32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
@@ -415,13 +415,13 @@ define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
; AVX-32-NEXT: vextractps $1, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps (%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -526,7 +526,7 @@ define <4 x float> @sitofp_v4i1_v4f32(<4 x i1> %x) #0 {
define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
; SSE-32-LABEL: uitofp_v4i1_v4f32:
; SSE-32: # %bb.0:
-; SSE-32-NEXT: andps {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE-32-NEXT: retl
;
@@ -538,7 +538,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
;
; SSE41-32-LABEL: uitofp_v4i1_v4f32:
; SSE41-32: # %bb.0:
-; SSE41-32-NEXT: andps {{\.LCPI.*}}, %xmm0
+; SSE41-32-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE41-32-NEXT: cvtdq2ps %xmm0, %xmm0
; SSE41-32-NEXT: retl
;
@@ -550,7 +550,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
;
; AVX1-32-LABEL: uitofp_v4i1_v4f32:
; AVX1-32: # %bb.0:
-; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX1-32-NEXT: retl
;
@@ -569,7 +569,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
;
; AVX512VL-32-LABEL: uitofp_v4i1_v4f32:
; AVX512VL-32: # %bb.0:
-; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512VL-32-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX512VL-32-NEXT: retl
;
@@ -588,7 +588,7 @@ define <4 x float> @uitofp_v4i1_v4f32(<4 x i1> %x) #0 {
;
; AVX512DQVL-32-LABEL: uitofp_v4i1_v4f32:
; AVX512DQVL-32: # %bb.0:
-; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512DQVL-32-NEXT: vcvtdq2ps %xmm0, %xmm0
; AVX512DQVL-32-NEXT: retl
;
@@ -737,10 +737,10 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; SSE-32: # %bb.0:
; SSE-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE-32-NEXT: pand %xmm0, %xmm1
-; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm1
+; SSE-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; SSE-32-NEXT: psrld $16, %xmm0
-; SSE-32-NEXT: por {{\.LCPI.*}}, %xmm0
-; SSE-32-NEXT: subps {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE-32-NEXT: subps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: addps %xmm1, %xmm0
; SSE-32-NEXT: retl
;
@@ -759,10 +759,10 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; SSE41-32: # %bb.0:
; SSE41-32-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535]
; SSE41-32-NEXT: pand %xmm0, %xmm1
-; SSE41-32-NEXT: por {{\.LCPI.*}}, %xmm1
+; SSE41-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; SSE41-32-NEXT: psrld $16, %xmm0
-; SSE41-32-NEXT: por {{\.LCPI.*}}, %xmm0
-; SSE41-32-NEXT: subps {{\.LCPI.*}}, %xmm0
+; SSE41-32-NEXT: por {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; SSE41-32-NEXT: subps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE41-32-NEXT: addps %xmm1, %xmm0
; SSE41-32-NEXT: retl
;
@@ -782,7 +782,7 @@ define <4 x float> @uitofp_v4i32_v4f32(<4 x i32> %x) #0 {
; AVX1-32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; AVX1-32-NEXT: vpsrld $16, %xmm0, %xmm0
; AVX1-32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
-; AVX1-32-NEXT: vsubps {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vsubps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX1-32-NEXT: retl
;
@@ -860,7 +860,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; SSE-32-LABEL: uitofp_v2i1_v2f64:
; SSE-32: # %bb.0:
; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE-32-NEXT: pand {{\.LCPI.*}}, %xmm0
+; SSE-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-32-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE-32-NEXT: retl
;
@@ -874,7 +874,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; SSE41-32-LABEL: uitofp_v2i1_v2f64:
; SSE41-32: # %bb.0:
; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; SSE41-32-NEXT: pand {{\.LCPI.*}}, %xmm0
+; SSE41-32-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE41-32-NEXT: cvtdq2pd %xmm0, %xmm0
; SSE41-32-NEXT: retl
;
@@ -888,7 +888,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; AVX1-32-LABEL: uitofp_v2i1_v2f64:
; AVX1-32: # %bb.0:
; AVX1-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX1-32-NEXT: retl
;
@@ -910,7 +910,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; AVX512VL-32-LABEL: uitofp_v2i1_v2f64:
; AVX512VL-32: # %bb.0:
; AVX512VL-32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512VL-32-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512VL-32-NEXT: retl
;
@@ -932,7 +932,7 @@ define <2 x double> @uitofp_v2i1_v2f64(<2 x i1> %x) #0 {
; AVX512DQVL-32-LABEL: uitofp_v2i1_v2f64:
; AVX512DQVL-32: # %bb.0:
; AVX512DQVL-32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512DQVL-32-NEXT: vcvtdq2pd %xmm0, %xmm0
; AVX512DQVL-32-NEXT: retl
;
@@ -1276,14 +1276,14 @@ define <2 x double> @uitofp_v2i64_v2f64(<2 x i64> %x) #0 {
; SSE-32-NEXT: movd %xmm1, %eax
; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE-32-NEXT: wait
; SSE-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE-32-NEXT: movd %xmm0, %eax
; SSE-32-NEXT: shrl $31, %eax
; SSE-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE-32-NEXT: fstpl (%esp)
; SSE-32-NEXT: wait
; SSE-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -1344,14 +1344,14 @@ define <2 x double> @uitofp_v2i64_v2f64(<2 x i64> %x) #0 {
; SSE41-32-NEXT: movd %xmm1, %eax
; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstpl {{[0-9]+}}(%esp)
; SSE41-32-NEXT: wait
; SSE41-32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-32-NEXT: movd %xmm0, %eax
; SSE41-32-NEXT: shrl $31, %eax
; SSE41-32-NEXT: fildll {{[0-9]+}}(%esp)
-; SSE41-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; SSE41-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; SSE41-32-NEXT: fstpl (%esp)
; SSE41-32-NEXT: wait
; SSE41-32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
@@ -1411,13 +1411,13 @@ define <2 x double> @uitofp_v2i64_v2f64(<2 x i64> %x) #0 {
; AVX-32-NEXT: vextractps $1, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl (%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
index 456e317a2138..3d1f87f3fc06 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-256.ll
@@ -94,7 +94,7 @@ define <8 x float> @sitofp_v8i1_v8f32(<8 x i1> %x) #0 {
define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
; AVX1-32-LABEL: uitofp_v8i1_v8f32:
; AVX1-32: # %bb.0:
-; AVX1-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-32-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
@@ -114,7 +114,7 @@ define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
;
; AVX2-32-LABEL: uitofp_v8i1_v8f32:
; AVX2-32: # %bb.0:
-; AVX2-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX2-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX2-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX2-32-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX2-32-NEXT: retl
@@ -128,7 +128,7 @@ define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
;
; AVX512F-32-LABEL: uitofp_v8i1_v8f32:
; AVX512F-32: # %bb.0:
-; AVX512F-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512F-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512F-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512F-32-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512F-32-NEXT: retl
@@ -142,7 +142,7 @@ define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
;
; AVX512VL-32-LABEL: uitofp_v8i1_v8f32:
; AVX512VL-32: # %bb.0:
-; AVX512VL-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512VL-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512VL-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512VL-32-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512VL-32-NEXT: retl
@@ -156,7 +156,7 @@ define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
;
; AVX512DQ-32-LABEL: uitofp_v8i1_v8f32:
; AVX512DQ-32: # %bb.0:
-; AVX512DQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512DQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512DQ-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQ-32-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512DQ-32-NEXT: retl
@@ -170,7 +170,7 @@ define <8 x float> @uitofp_v8i1_v8f32(<8 x i1> %x) #0 {
;
; AVX512DQVL-32-LABEL: uitofp_v8i1_v8f32:
; AVX512DQVL-32: # %bb.0:
-; AVX512DQVL-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX512DQVL-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX512DQVL-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX512DQVL-32-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX512DQVL-32-NEXT: retl
@@ -386,8 +386,8 @@ define <8 x float> @uitofp_v8i32_v8f32(<8 x i32> %x) #0 {
; AVX1-32-NEXT: vpsrld $16, %xmm2, %xmm2
; AVX1-32-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-32-NEXT: vcvtdq2ps %ymm1, %ymm1
-; AVX1-32-NEXT: vmulps {{\.LCPI.*}}, %ymm1, %ymm1
-; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; AVX1-32-NEXT: vmulps {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
+; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; AVX1-32-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-32-NEXT: vaddps %ymm0, %ymm1, %ymm0
; AVX1-32-NEXT: retl
@@ -462,7 +462,7 @@ define <4 x double> @sitofp_v4i1_v4f64(<4 x i1> %x) #0 {
define <4 x double> @uitofp_v4i1_v4f64(<4 x i1> %x) #0 {
; AVX1-32-LABEL: uitofp_v4i1_v4f64:
; AVX1-32: # %bb.0:
-; AVX1-32-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX1-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX1-32-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX1-32-NEXT: retl
;
@@ -488,7 +488,7 @@ define <4 x double> @uitofp_v4i1_v4f64(<4 x i1> %x) #0 {
;
; AVX512VL-32-LABEL: uitofp_v4i1_v4f64:
; AVX512VL-32: # %bb.0:
-; AVX512VL-32-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; AVX512VL-32-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512VL-32-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512VL-32-NEXT: retl
;
@@ -507,7 +507,7 @@ define <4 x double> @uitofp_v4i1_v4f64(<4 x i1> %x) #0 {
;
; AVX512DQVL-32-LABEL: uitofp_v4i1_v4f64:
; AVX512DQVL-32: # %bb.0:
-; AVX512DQVL-32-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; AVX512DQVL-32-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; AVX512DQVL-32-NEXT: vcvtdq2pd %xmm0, %ymm0
; AVX512DQVL-32-NEXT: retl
;
@@ -767,25 +767,25 @@ define <4 x double> @uitofp_v4i64_v4f64(<4 x i64> %x) #0 {
; AVX-32-NEXT: vextractps $1, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl (%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $1, %xmm1, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm1, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstpl {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1051,25 +1051,25 @@ define <4 x float> @uitofp_v4i64_v4f32(<4 x i64> %x) #0 {
; AVX-32-NEXT: vextractps $1, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps (%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm0, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $1, %xmm1, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vextractps $3, %xmm1, %eax
; AVX-32-NEXT: shrl $31, %eax
; AVX-32-NEXT: fildll {{[0-9]+}}(%esp)
-; AVX-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; AVX-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; AVX-32-NEXT: fstps {{[0-9]+}}(%esp)
; AVX-32-NEXT: wait
; AVX-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
diff --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
index 00be17a4d162..4d54e96a2252 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-512.ll
@@ -42,7 +42,7 @@ define <16 x float> @sitofp_v16i1_v16f32(<16 x i1> %x) #0 {
define <16 x float> @uitofp_v16i1_v16f32(<16 x i1> %x) #0 {
; NODQ-32-LABEL: uitofp_v16i1_v16f32:
; NODQ-32: # %bb.0:
-; NODQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; NODQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; NODQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; NODQ-32-NEXT: vcvtdq2ps %zmm0, %zmm0
; NODQ-32-NEXT: retl
@@ -56,7 +56,7 @@ define <16 x float> @uitofp_v16i1_v16f32(<16 x i1> %x) #0 {
;
; DQ-32-LABEL: uitofp_v16i1_v16f32:
; DQ-32: # %bb.0:
-; DQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; DQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; DQ-32-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
; DQ-32-NEXT: vcvtdq2ps %zmm0, %zmm0
; DQ-32-NEXT: retl
@@ -160,7 +160,7 @@ define <8 x double> @sitofp_v8i1_v8f64(<8 x i1> %x) #0 {
define <8 x double> @uitofp_v8i1_v8f64(<8 x i1> %x) #0 {
; NODQ-32-LABEL: uitofp_v8i1_v8f64:
; NODQ-32: # %bb.0:
-; NODQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; NODQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; NODQ-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; NODQ-32-NEXT: vcvtdq2pd %ymm0, %zmm0
; NODQ-32-NEXT: retl
@@ -174,7 +174,7 @@ define <8 x double> @uitofp_v8i1_v8f64(<8 x i1> %x) #0 {
;
; DQ-32-LABEL: uitofp_v8i1_v8f64:
; DQ-32: # %bb.0:
-; DQ-32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; DQ-32-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; DQ-32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; DQ-32-NEXT: vcvtdq2pd %ymm0, %zmm0
; DQ-32-NEXT: retl
@@ -387,49 +387,49 @@ define <8 x double> @uitofp_v8i64_v8f64(<8 x i64> %x) #0 {
; NODQ-32-NEXT: vextractps $1, %xmm2, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm2, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $1, %xmm3, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm3, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $1, %xmm0, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl (%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm0, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $1, %xmm1, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm1, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstpl {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -612,49 +612,49 @@ define <8 x float> @uitofp_v8i64_v8f32(<8 x i64> %x) #0 {
; NODQ-32-NEXT: vextractps $1, %xmm0, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps (%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm0, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $1, %xmm3, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm3, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $1, %xmm2, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm2, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $1, %xmm1, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vextractps $3, %xmm1, %eax
; NODQ-32-NEXT: shrl $31, %eax
; NODQ-32-NEXT: fildll {{[0-9]+}}(%esp)
-; NODQ-32-NEXT: fadds {{\.LCPI.*}}(,%eax,4)
+; NODQ-32-NEXT: fadds {{\.LCPI[0-9]+_[0-9]+}}(,%eax,4)
; NODQ-32-NEXT: fstps {{[0-9]+}}(%esp)
; NODQ-32-NEXT: wait
; NODQ-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
diff --git a/llvm/test/CodeGen/X86/vec_fabs.ll b/llvm/test/CodeGen/X86/vec_fabs.ll
index e866ef23a5b9..8ce675054f41 100644
--- a/llvm/test/CodeGen/X86/vec_fabs.ll
+++ b/llvm/test/CodeGen/X86/vec_fabs.ll
@@ -9,7 +9,7 @@
define <2 x double> @fabs_v2f64(<2 x double> %p) {
; X86-LABEL: fabs_v2f64:
; X86: # %bb.0:
-; X86-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: fabs_v2f64:
@@ -24,17 +24,17 @@ declare <2 x double> @llvm.fabs.v2f64(<2 x double> %p)
define <4 x float> @fabs_v4f32(<4 x float> %p) {
; X86-AVX-LABEL: fabs_v4f32:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X86-AVX512VL-LABEL: fabs_v4f32:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VL-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; X86-AVX512VL-NEXT: retl
;
; X86-AVX512VLDQ-LABEL: fabs_v4f32:
; X86-AVX512VLDQ: # %bb.0:
-; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to4}, %xmm0, %xmm0
+; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %xmm0, %xmm0
; X86-AVX512VLDQ-NEXT: retl
;
; X64-AVX-LABEL: fabs_v4f32:
@@ -59,17 +59,17 @@ declare <4 x float> @llvm.fabs.v4f32(<4 x float> %p)
define <4 x double> @fabs_v4f64(<4 x double> %p) {
; X86-AVX-LABEL: fabs_v4f64:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX-NEXT: retl
;
; X86-AVX512VL-LABEL: fabs_v4f64:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to4}, %ymm0, %ymm0
+; X86-AVX512VL-NEXT: vpandq {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %ymm0, %ymm0
; X86-AVX512VL-NEXT: retl
;
; X86-AVX512VLDQ-LABEL: fabs_v4f64:
; X86-AVX512VLDQ: # %bb.0:
-; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to4}, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI[0-9]+_[0-9]+}}{1to4}, %ymm0, %ymm0
; X86-AVX512VLDQ-NEXT: retl
;
; X64-AVX-LABEL: fabs_v4f64:
@@ -94,17 +94,17 @@ declare <4 x double> @llvm.fabs.v4f64(<4 x double> %p)
define <8 x float> @fabs_v8f32(<8 x float> %p) {
; X86-AVX-LABEL: fabs_v8f32:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX-NEXT: retl
;
; X86-AVX512VL-LABEL: fabs_v8f32:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VL-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
; X86-AVX512VL-NEXT: retl
;
; X86-AVX512VLDQ-LABEL: fabs_v8f32:
; X86-AVX512VLDQ: # %bb.0:
-; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to8}, %ymm0, %ymm0
+; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %ymm0, %ymm0
; X86-AVX512VLDQ-NEXT: retl
;
; X64-AVX-LABEL: fabs_v8f32:
@@ -136,12 +136,12 @@ define <8 x double> @fabs_v8f64(<8 x double> %p) {
;
; X86-AVX512VL-LABEL: fabs_v8f64:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vpandq {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
+; X86-AVX512VL-NEXT: vpandq {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
; X86-AVX512VL-NEXT: retl
;
; X86-AVX512VLDQ-LABEL: fabs_v8f64:
; X86-AVX512VLDQ: # %bb.0:
-; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI.*}}{1to8}, %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT: vandpd {{\.LCPI[0-9]+_[0-9]+}}{1to8}, %zmm0, %zmm0
; X86-AVX512VLDQ-NEXT: retl
;
; X64-AVX-LABEL: fabs_v8f64:
@@ -175,12 +175,12 @@ define <16 x float> @fabs_v16f32(<16 x float> %p) {
;
; X86-AVX512VL-LABEL: fabs_v16f32:
; X86-AVX512VL: # %bb.0:
-; X86-AVX512VL-NEXT: vpandd {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; X86-AVX512VL-NEXT: vpandd {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; X86-AVX512VL-NEXT: retl
;
; X86-AVX512VLDQ-LABEL: fabs_v16f32:
; X86-AVX512VLDQ: # %bb.0:
-; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI.*}}{1to16}, %zmm0, %zmm0
+; X86-AVX512VLDQ-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
; X86-AVX512VLDQ-NEXT: retl
;
; X64-AVX-LABEL: fabs_v16f32:
diff --git a/llvm/test/CodeGen/X86/vec_fneg.ll b/llvm/test/CodeGen/X86/vec_fneg.ll
index 3794bd2ce94b..9c162d2123c6 100644
--- a/llvm/test/CodeGen/X86/vec_fneg.ll
+++ b/llvm/test/CodeGen/X86/vec_fneg.ll
@@ -10,7 +10,7 @@
define <4 x float> @t1(<4 x float> %Q) nounwind {
; X32-SSE-LABEL: t1:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: t1:
@@ -166,7 +166,7 @@ define <2 x float> @fneg_bitcast(i64 %i) nounwind {
define <4 x float> @fneg_undef_elts_v4f32(<4 x float> %x) {
; X32-SSE-LABEL: fneg_undef_elts_v4f32:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: fneg_undef_elts_v4f32:
@@ -194,7 +194,7 @@ define <4 x float> @fsub0_undef_elts_v4f32(<4 x float> %x) {
define <4 x float> @fneg(<4 x float> %Q) nounwind {
; X32-SSE-LABEL: fneg:
; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: retl
;
; X64-SSE-LABEL: fneg:
diff --git a/llvm/test/CodeGen/X86/vec_fpext.ll b/llvm/test/CodeGen/X86/vec_fpext.ll
index 46ad1f16f3c0..84b6039dc1ad 100644
--- a/llvm/test/CodeGen/X86/vec_fpext.ll
+++ b/llvm/test/CodeGen/X86/vec_fpext.ll
@@ -255,42 +255,42 @@ define <2 x double> @fpext_fromconst() {
; X32-SSE: # %bb.0: # %entry
; X32-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
; X32-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A]
-; X32-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X32-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X32-SSE-NEXT: retl # encoding: [0xc3]
;
; X32-AVX-LABEL: fpext_fromconst:
; X32-AVX: # %bb.0: # %entry
; X32-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
; X32-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; X32-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X32-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X32-AVX-NEXT: retl # encoding: [0xc3]
;
; X32-AVX512VL-LABEL: fpext_fromconst:
; X32-AVX512VL: # %bb.0: # %entry
-; X32-AVX512VL-NEXT: vmovaps {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0]
+; X32-AVX512VL-NEXT: vmovaps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0]
; X32-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; X32-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
+; X32-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4
; X32-AVX512VL-NEXT: retl # encoding: [0xc3]
;
; X64-SSE-LABEL: fpext_fromconst:
; X64-SSE: # %bb.0: # %entry
; X64-SSE-NEXT: movaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
; X64-SSE-NEXT: # encoding: [0x0f,0x28,0x05,A,A,A,A]
-; X64-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-SSE-NEXT: # fixup A - offset: 3, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-SSE-NEXT: retq # encoding: [0xc3]
;
; X64-AVX-LABEL: fpext_fromconst:
; X64-AVX: # %bb.0: # %entry
; X64-AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
; X64-AVX-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX-NEXT: retq # encoding: [0xc3]
;
; X64-AVX512VL-LABEL: fpext_fromconst:
; X64-AVX512VL: # %bb.0: # %entry
; X64-AVX512VL-NEXT: vmovaps {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0]
; X64-AVX512VL-NEXT: # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
-; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
+; X64-AVX512VL-NEXT: # fixup A - offset: 4, value: {{\.LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
; X64-AVX512VL-NEXT: retq # encoding: [0xc3]
entry:
%0 = insertelement <2 x float> undef, float 1.0, i32 0
diff --git a/llvm/test/CodeGen/X86/vec_fptrunc.ll b/llvm/test/CodeGen/X86/vec_fptrunc.ll
index e7318d9d6972..56da56204799 100644
--- a/llvm/test/CodeGen/X86/vec_fptrunc.ll
+++ b/llvm/test/CodeGen/X86/vec_fptrunc.ll
@@ -186,14 +186,14 @@ define <4 x float> @fptrunc_fromreg2_zext(<2 x double> %arg) {
define <4 x float> @fptrunc_fromconst() {
; X32-SSE-LABEL: fptrunc_fromconst:
; X32-SSE: # %bb.0: # %entry
-; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm1
-; X32-SSE-NEXT: cvtpd2ps {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: cvtpd2ps {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
+; X32-SSE-NEXT: cvtpd2ps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X32-SSE-NEXT: retl
;
; X32-AVX-LABEL: fptrunc_fromconst:
; X32-AVX: # %bb.0: # %entry
-; X32-AVX-NEXT: vcvtpd2psy {{\.LCPI.*}}, %xmm0
+; X32-AVX-NEXT: vcvtpd2psy {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-AVX-NEXT: retl
;
; X64-SSE-LABEL: fptrunc_fromconst:
diff --git a/llvm/test/CodeGen/X86/vec_logical.ll b/llvm/test/CodeGen/X86/vec_logical.ll
index ec29d4886a2b..12ee3a7336fa 100644
--- a/llvm/test/CodeGen/X86/vec_logical.ll
+++ b/llvm/test/CodeGen/X86/vec_logical.ll
@@ -5,13 +5,13 @@
define void @t(<4 x float> %A) {
; SSE-LABEL: t:
; SSE: # %bb.0:
-; SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; SSE-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; SSE-NEXT: movaps %xmm0, 0
; SSE-NEXT: retl
;
; AVX-LABEL: t:
; AVX: # %bb.0:
-; AVX-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0
+; AVX-NEXT: vxorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, 0
; AVX-NEXT: retl
%tmp1277 = fsub <4 x float> < float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00 >, %A
diff --git a/llvm/test/CodeGen/X86/vec_partial.ll b/llvm/test/CodeGen/X86/vec_partial.ll
index a9044c6ffb50..f3c8369e7834 100644
--- a/llvm/test/CodeGen/X86/vec_partial.ll
+++ b/llvm/test/CodeGen/X86/vec_partial.ll
@@ -6,7 +6,7 @@
define <3 x float> @addf3(<3 x float> %x) {
; X86-LABEL: addf3:
; X86: # %bb.0: # %entry
-; X86-NEXT: addps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: addps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: addf3:
diff --git a/llvm/test/CodeGen/X86/vec_reassociate.ll b/llvm/test/CodeGen/X86/vec_reassociate.ll
index c8b61809d31a..277f877cacf0 100644
--- a/llvm/test/CodeGen/X86/vec_reassociate.ll
+++ b/llvm/test/CodeGen/X86/vec_reassociate.ll
@@ -38,7 +38,7 @@ define <4 x i32> @mul_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32:
; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
-; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32:
@@ -56,7 +56,7 @@ define <4 x i32> @mul_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: mul_4i32_commute:
; X86: # %bb.0:
; X86-NEXT: pmulld %xmm1, %xmm0
-; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_4i32_commute:
@@ -74,7 +74,7 @@ define <4 x i32> @and_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: and_4i32:
; X86: # %bb.0:
; X86-NEXT: andps %xmm1, %xmm0
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: and_4i32:
@@ -92,7 +92,7 @@ define <4 x i32> @and_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: and_4i32_commute:
; X86: # %bb.0:
; X86-NEXT: andps %xmm1, %xmm0
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: and_4i32_commute:
@@ -110,7 +110,7 @@ define <4 x i32> @or_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: or_4i32:
; X86: # %bb.0:
; X86-NEXT: orps %xmm1, %xmm0
-; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: or_4i32:
@@ -128,7 +128,7 @@ define <4 x i32> @or_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: or_4i32_commute:
; X86: # %bb.0:
; X86-NEXT: orps %xmm1, %xmm0
-; X86-NEXT: orps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: orps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: or_4i32_commute:
@@ -146,7 +146,7 @@ define <4 x i32> @xor_4i32(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: xor_4i32:
; X86: # %bb.0:
; X86-NEXT: xorps %xmm1, %xmm0
-; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: xor_4i32:
@@ -164,7 +164,7 @@ define <4 x i32> @xor_4i32_commute(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: xor_4i32_commute:
; X86: # %bb.0:
; X86-NEXT: xorps %xmm1, %xmm0
-; X86-NEXT: xorps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: xorps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: xor_4i32_commute:
diff --git a/llvm/test/CodeGen/X86/vec_shift4.ll b/llvm/test/CodeGen/X86/vec_shift4.ll
index 789796ed4a41..e0428b185e5f 100644
--- a/llvm/test/CodeGen/X86/vec_shift4.ll
+++ b/llvm/test/CodeGen/X86/vec_shift4.ll
@@ -6,7 +6,7 @@ define <2 x i64> @shl1(<4 x i32> %r, <4 x i32> %a) nounwind readnone ssp {
; X86-LABEL: shl1:
; X86: # %bb.0: # %entry
; X86-NEXT: pslld $23, %xmm1
-; X86-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-NEXT: cvttps2dq %xmm1, %xmm1
; X86-NEXT: pmulld %xmm1, %xmm0
; X86-NEXT: retl
@@ -31,12 +31,12 @@ define <2 x i64> @shl2(<16 x i8> %r, <16 x i8> %a) nounwind readnone ssp {
; X86-NEXT: psllw $5, %xmm1
; X86-NEXT: movdqa %xmm0, %xmm3
; X86-NEXT: psllw $4, %xmm3
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm3
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: pblendvb %xmm0, %xmm3, %xmm2
; X86-NEXT: movdqa %xmm2, %xmm3
; X86-NEXT: psllw $2, %xmm3
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm3
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-NEXT: paddb %xmm1, %xmm1
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: pblendvb %xmm0, %xmm3, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll
index d74db60f7865..c73edfbc525c 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll
@@ -406,7 +406,7 @@ define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,3]
; X86-SSE2-NEXT: pand %xmm4, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm2
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -703,7 +703,7 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
; X86-SSE2-NEXT: pandn %xmm3, %xmm5
; X86-SSE2-NEXT: psrlw $1, %xmm3
; X86-SSE2-NEXT: pand %xmm4, %xmm3
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa %xmm2, %xmm1
; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
; X86-SSE2-NEXT: pslld $23, %xmm1
@@ -1017,7 +1017,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm1, %xmm7
; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: pand %xmm3, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: por %xmm7, %xmm1
; X86-SSE2-NEXT: paddb %xmm6, %xmm6
; X86-SSE2-NEXT: pxor %xmm3, %xmm3
@@ -1026,7 +1026,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm1, %xmm7
; X86-SSE2-NEXT: psrlw $2, %xmm1
; X86-SSE2-NEXT: pand %xmm3, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: por %xmm7, %xmm1
; X86-SSE2-NEXT: paddb %xmm6, %xmm6
; X86-SSE2-NEXT: pxor %xmm3, %xmm3
@@ -1045,7 +1045,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm0, %xmm4
; X86-SSE2-NEXT: psllw $4, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm4, %xmm0
; X86-SSE2-NEXT: paddb %xmm2, %xmm2
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
@@ -1054,7 +1054,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm0, %xmm4
; X86-SSE2-NEXT: psllw $2, %xmm0
; X86-SSE2-NEXT: pand %xmm1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm4, %xmm0
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
; X86-SSE2-NEXT: paddb %xmm2, %xmm2
@@ -1766,7 +1766,7 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %
; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
; X86-SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psrlw $1, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psrlw %xmm2, %xmm1
; X86-SSE2-NEXT: psrlw %xmm2, %xmm5
; X86-SSE2-NEXT: psrlw $8, %xmm5
@@ -2407,9 +2407,9 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
;
; X86-SSE2-LABEL: constant_funnnel_v8i16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm1
-; X86-SSE2-NEXT: pmulhuw {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: por %xmm1, %xmm0
; X86-SSE2-NEXT: retl
%res = call <8 x i16> @llvm.fshl.v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>)
@@ -2603,22 +2603,22 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: pand %xmm3, %xmm2
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm3, %xmm0
; X86-SSE2-NEXT: packuswb %xmm2, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: psrlw $8, %xmm3
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psrlw $8, %xmm1
; X86-SSE2-NEXT: packuswb %xmm3, %xmm1
; X86-SSE2-NEXT: por %xmm1, %xmm0
@@ -2932,9 +2932,9 @@ define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwi
; X86-SSE2-LABEL: splatconstant_funnnel_v16i8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $4, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psllw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
; X86-SSE2-NEXT: retl
%res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
index 762672bc446c..2beaaea11de9 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
@@ -282,9 +282,9 @@ define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
;
; X86-SSE2-LABEL: var_funnnel_v4i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -458,7 +458,7 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
;
; X86-SSE2-LABEL: var_funnnel_v8i16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
; X86-SSE2-NEXT: pslld $23, %xmm2
@@ -706,20 +706,20 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm3
; X86-SSE2-NEXT: movdqa %xmm2, %xmm4
; X86-SSE2-NEXT: psrlw $4, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: movdqa %xmm2, %xmm5
; X86-SSE2-NEXT: psllw $4, %xmm5
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm5
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm5
; X86-SSE2-NEXT: por %xmm4, %xmm5
; X86-SSE2-NEXT: pand %xmm3, %xmm5
; X86-SSE2-NEXT: pandn %xmm2, %xmm3
; X86-SSE2-NEXT: por %xmm5, %xmm3
; X86-SSE2-NEXT: movdqa %xmm3, %xmm2
; X86-SSE2-NEXT: psrlw $6, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa %xmm3, %xmm4
; X86-SSE2-NEXT: psllw $2, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: por %xmm2, %xmm4
; X86-SSE2-NEXT: paddb %xmm1, %xmm1
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
@@ -731,7 +731,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
; X86-SSE2-NEXT: paddb %xmm2, %xmm3
; X86-SSE2-NEXT: movdqa %xmm2, %xmm4
; X86-SSE2-NEXT: psrlw $7, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: por %xmm3, %xmm4
; X86-SSE2-NEXT: paddb %xmm1, %xmm1
; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0
@@ -1103,7 +1103,7 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v8i16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0]
; X86-SSE2-NEXT: pand %xmm1, %xmm2
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -1333,7 +1333,7 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v16i8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X86-SSE2-NEXT: psubb %xmm1, %xmm2
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
@@ -1848,20 +1848,20 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: psrlw $8, %xmm2
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: psrlw $8, %xmm3
; X86-SSE2-NEXT: packuswb %xmm2, %xmm3
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: pand %xmm2, %xmm1
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm2, %xmm0
; X86-SSE2-NEXT: packuswb %xmm1, %xmm0
; X86-SSE2-NEXT: por %xmm3, %xmm0
@@ -2168,9 +2168,9 @@ define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psllw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
; X86-SSE2-NEXT: retl
%res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
index ced1eab5b343..5bb7cacf6a9e 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
@@ -135,9 +135,9 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
;
; X86-SSE2-LABEL: var_funnnel_v2i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -290,9 +290,9 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index 6e7d036d9267..f36a871d5e41 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -407,7 +407,7 @@ define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt)
; X86-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,3]
; X86-SSE2-NEXT: pandn %xmm4, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm2
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
; X86-SSE2-NEXT: pslld $1, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
@@ -702,7 +702,7 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %amt)
; X86-SSE2-NEXT: pandn %xmm1, %xmm4
; X86-SSE2-NEXT: psrlw $1, %xmm1
; X86-SSE2-NEXT: pand %xmm3, %xmm1
-; X86-SSE2-NEXT: pandn {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pandn {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa %xmm2, %xmm3
; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7]
; X86-SSE2-NEXT: pslld $23, %xmm3
@@ -1005,7 +1005,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm0, %xmm7
; X86-SSE2-NEXT: psllw $4, %xmm0
; X86-SSE2-NEXT: pand %xmm6, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm7, %xmm0
; X86-SSE2-NEXT: paddb %xmm4, %xmm4
; X86-SSE2-NEXT: pxor %xmm6, %xmm6
@@ -1014,7 +1014,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm0, %xmm7
; X86-SSE2-NEXT: psllw $2, %xmm0
; X86-SSE2-NEXT: pand %xmm6, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm7, %xmm0
; X86-SSE2-NEXT: paddb %xmm4, %xmm4
; X86-SSE2-NEXT: pxor %xmm6, %xmm6
@@ -1031,7 +1031,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm1, %xmm6
; X86-SSE2-NEXT: psrlw $4, %xmm1
; X86-SSE2-NEXT: pand %xmm5, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: por %xmm6, %xmm1
; X86-SSE2-NEXT: paddb %xmm2, %xmm2
; X86-SSE2-NEXT: pxor %xmm5, %xmm5
@@ -1040,7 +1040,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm1, %xmm6
; X86-SSE2-NEXT: psrlw $2, %xmm1
; X86-SSE2-NEXT: pand %xmm5, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: por %xmm6, %xmm1
; X86-SSE2-NEXT: paddb %xmm2, %xmm2
; X86-SSE2-NEXT: pcmpgtb %xmm2, %xmm3
@@ -1048,7 +1048,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt)
; X86-SSE2-NEXT: pandn %xmm1, %xmm2
; X86-SSE2-NEXT: psrlw $1, %xmm1
; X86-SSE2-NEXT: pand %xmm3, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: por %xmm2, %xmm1
; X86-SSE2-NEXT: por %xmm4, %xmm1
; X86-SSE2-NEXT: por %xmm1, %xmm0
@@ -2133,10 +2133,10 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,65535,65535,65535,65535]
; X86-SSE2-NEXT: movdqa %xmm2, %xmm3
; X86-SSE2-NEXT: pandn %xmm1, %xmm3
-; X86-SSE2-NEXT: pmulhuw {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pand %xmm1, %xmm2
; X86-SSE2-NEXT: psllw $1, %xmm0
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm3, %xmm0
; X86-SSE2-NEXT: por %xmm2, %xmm0
; X86-SSE2-NEXT: retl
@@ -2321,20 +2321,20 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: psrlw $8, %xmm3
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psrlw $8, %xmm1
; X86-SSE2-NEXT: packuswb %xmm3, %xmm1
; X86-SSE2-NEXT: paddb %xmm0, %xmm0
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: pand %xmm3, %xmm2
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm3, %xmm0
; X86-SSE2-NEXT: packuswb %xmm2, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
@@ -2648,9 +2648,9 @@ define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y) nounwi
; X86-SSE2-LABEL: splatconstant_funnnel_v16i8:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $4, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psllw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
; X86-SSE2-NEXT: retl
%res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
index 11753627a571..6adf34e0dcf5 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
@@ -296,9 +296,9 @@ define <4 x i32> @var_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubd %xmm1, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm2
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -492,7 +492,7 @@ define <8 x i16> @var_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubw %xmm1, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa %xmm2, %xmm1
; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7]
; X86-SSE2-NEXT: pslld $23, %xmm1
@@ -746,20 +746,20 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
; X86-SSE2-NEXT: pcmpgtb %xmm3, %xmm1
; X86-SSE2-NEXT: movdqa %xmm2, %xmm4
; X86-SSE2-NEXT: psrlw $4, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: movdqa %xmm2, %xmm5
; X86-SSE2-NEXT: psllw $4, %xmm5
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm5
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm5
; X86-SSE2-NEXT: por %xmm4, %xmm5
; X86-SSE2-NEXT: pand %xmm1, %xmm5
; X86-SSE2-NEXT: pandn %xmm2, %xmm1
; X86-SSE2-NEXT: por %xmm5, %xmm1
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
; X86-SSE2-NEXT: psrlw $6, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa %xmm1, %xmm4
; X86-SSE2-NEXT: psllw $2, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: por %xmm2, %xmm4
; X86-SSE2-NEXT: paddb %xmm3, %xmm3
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
@@ -771,7 +771,7 @@ define <16 x i8> @var_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
; X86-SSE2-NEXT: paddb %xmm2, %xmm1
; X86-SSE2-NEXT: movdqa %xmm2, %xmm4
; X86-SSE2-NEXT: psrlw $7, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: por %xmm1, %xmm4
; X86-SSE2-NEXT: paddb %xmm3, %xmm3
; X86-SSE2-NEXT: pcmpgtb %xmm3, %xmm0
@@ -1179,7 +1179,7 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubw %xmm1, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,0,0]
; X86-SSE2-NEXT: pand %xmm2, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -1420,7 +1420,7 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubb %xmm1, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X86-SSE2-NEXT: psubb %xmm2, %xmm3
; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
@@ -1934,20 +1934,20 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: psrlw $8, %xmm2
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: psrlw $8, %xmm3
; X86-SSE2-NEXT: packuswb %xmm2, %xmm3
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: pand %xmm2, %xmm1
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm2, %xmm0
; X86-SSE2-NEXT: packuswb %xmm1, %xmm0
; X86-SSE2-NEXT: por %xmm3, %xmm0
@@ -2254,9 +2254,9 @@ define <16 x i8> @splatconstant_funnnel_v16i8(<16 x i8> %x) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psllw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
; X86-SSE2-NEXT: retl
%res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
index a89efc635070..c25f277dfbac 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
@@ -147,9 +147,9 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubd %xmm1, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm2
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -316,9 +316,9 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
; X86-SSE2-NEXT: psubd %xmm1, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: pslld $23, %xmm2
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll
index f8c5a4fc253f..f0673bdd1aed 100644
--- a/llvm/test/CodeGen/X86/vector-gep.ll
+++ b/llvm/test/CodeGen/X86/vector-gep.ll
@@ -6,7 +6,7 @@ define <4 x i32*> @AGEP0(i32* %ptr) nounwind {
; CHECK-LABEL: AGEP0:
; CHECK: # %bb.0:
; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
-; CHECK-NEXT: vpaddd {{\.LCPI.*}}, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; CHECK-NEXT: retl
%vecinit.i = insertelement <4 x i32*> undef, i32* %ptr, i32 0
%vecinit2.i = insertelement <4 x i32*> %vecinit.i, i32* %ptr, i32 1
diff --git a/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll b/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll
index 8ce651e9cd15..85157d11e4b4 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-v2i32.ll
@@ -259,7 +259,7 @@ define void @test_urem_pow2_v2i32(<2 x i32>* %x, <2 x i32>* %y) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-NEXT: andps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movlps %xmm0, (%eax)
; X86-NEXT: retl
%a = load <2 x i32>, <2 x i32>* %x
diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll
index cc8b39107064..9576a0d7a8dc 100644
--- a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll
+++ b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll
@@ -236,7 +236,7 @@ define <2 x i64> @testv2i64(<2 x i64> %in) nounwind {
; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE-NEXT: pxor %xmm4, %xmm4
; X32-SSE-NEXT: pshufb %xmm1, %xmm2
; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1
@@ -489,7 +489,7 @@ define <2 x i64> @testv2i64u(<2 x i64> %in) nounwind {
; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE-NEXT: pxor %xmm4, %xmm4
; X32-SSE-NEXT: pshufb %xmm1, %xmm2
; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1
@@ -724,7 +724,7 @@ define <4 x i32> @testv4i32(<4 x i32> %in) nounwind {
; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE-NEXT: pxor %xmm4, %xmm4
; X32-SSE-NEXT: pshufb %xmm1, %xmm2
; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1
@@ -953,7 +953,7 @@ define <4 x i32> @testv4i32u(<4 x i32> %in) nounwind {
; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE-NEXT: pxor %xmm4, %xmm4
; X32-SSE-NEXT: pshufb %xmm1, %xmm2
; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1
@@ -1153,7 +1153,7 @@ define <8 x i16> @testv8i16(<8 x i16> %in) nounwind {
; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE-NEXT: pxor %xmm4, %xmm4
; X32-SSE-NEXT: pshufb %xmm1, %xmm2
; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1
@@ -1346,7 +1346,7 @@ define <8 x i16> @testv8i16u(<8 x i16> %in) nounwind {
; X32-SSE-NEXT: pshufb %xmm0, %xmm3
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
; X32-SSE-NEXT: psrlw $4, %xmm1
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X32-SSE-NEXT: pxor %xmm4, %xmm4
; X32-SSE-NEXT: pshufb %xmm1, %xmm2
; X32-SSE-NEXT: pcmpeqb %xmm4, %xmm1
@@ -1501,7 +1501,7 @@ define <16 x i8> @testv16i8(<16 x i8> %in) nounwind {
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: pshufb %xmm0, %xmm2
; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: pxor %xmm3, %xmm3
; X32-SSE-NEXT: pcmpeqb %xmm0, %xmm3
; X32-SSE-NEXT: pand %xmm2, %xmm3
@@ -1651,7 +1651,7 @@ define <16 x i8> @testv16i8u(<16 x i8> %in) nounwind {
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
; X32-SSE-NEXT: pshufb %xmm0, %xmm2
; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X32-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X32-SSE-NEXT: pxor %xmm3, %xmm3
; X32-SSE-NEXT: pcmpeqb %xmm0, %xmm3
; X32-SSE-NEXT: pand %xmm2, %xmm3
diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
index b7632aea6d2b..9624b3601a95 100644
--- a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
+++ b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll
@@ -164,7 +164,7 @@ define <4 x i64> @testv4i64(<4 x i64> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
@@ -346,7 +346,7 @@ define <4 x i64> @testv4i64u(<4 x i64> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
@@ -503,7 +503,7 @@ define <8 x i32> @testv8i32(<8 x i32> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
@@ -655,7 +655,7 @@ define <8 x i32> @testv8i32u(<8 x i32> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
@@ -778,7 +778,7 @@ define <16 x i16> @testv16i16(<16 x i16> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
@@ -895,7 +895,7 @@ define <16 x i16> @testv16i16u(<16 x i16> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm3
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm3, %ymm3
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm3, %ymm3
; X32-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm3, %ymm5
; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
@@ -994,7 +994,7 @@ define <32 x i8> @testv32i8(<32 x i8> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
; X32-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3
; X32-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -1088,7 +1088,7 @@ define <32 x i8> @testv32i8u(<32 x i8> %in) nounwind {
; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
; X32-AVX-NEXT: vpshufb %ymm0, %ymm1, %ymm2
; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X32-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X32-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
; X32-AVX-NEXT: vpcmpeqb %ymm3, %ymm0, %ymm3
; X32-AVX-NEXT: vpand %ymm3, %ymm2, %ymm2
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll
index 20e6e05440d8..837bf8a27f3a 100644
--- a/llvm/test/CodeGen/X86/vector-mul.ll
+++ b/llvm/test/CodeGen/X86/vector-mul.ll
@@ -55,7 +55,7 @@ define <16 x i8> @mul_v16i8_32(<16 x i8> %a0) nounwind {
; X86-SSE-LABEL: mul_v16i8_32:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psllw $5, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v16i8_32:
@@ -118,7 +118,7 @@ define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
; X86-SSE-LABEL: mul_v4i32_1_2_4_8:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v4i32_1_2_4_8:
@@ -147,7 +147,7 @@ define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind {
; X86-SSE-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v8i16_1_2_4_8_16_32_64_128:
@@ -250,7 +250,7 @@ define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind {
; X86-SSE-LABEL: mul_v4i32_17:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v4i32_17:
@@ -280,7 +280,7 @@ define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_17(<8 x i16> %a0) nounwind {
; X86-SSE-LABEL: mul_v8i16_17:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v8i16_17:
@@ -301,7 +301,7 @@ define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
; X86-SSE-NEXT: psllw $4, %xmm1
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: paddb %xmm0, %xmm1
; X86-SSE-NEXT: movdqa %xmm1, %xmm0
; X86-SSE-NEXT: retl
@@ -523,7 +523,7 @@ define <2 x i64> @mul_v2i64_neg1025(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_neg33(<4 x i32> %a0) nounwind {
; X86-SSE-LABEL: mul_v4i32_neg33:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v4i32_neg33:
@@ -553,7 +553,7 @@ define <4 x i32> @mul_v4i32_neg33(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_neg9(<8 x i16> %a0) nounwind {
; X86-SSE-LABEL: mul_v8i16_neg9:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v8i16_neg9:
@@ -574,7 +574,7 @@ define <16 x i8> @mul_v16i8_neg5(<16 x i8> %a0) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
; X86-SSE-NEXT: psllw $2, %xmm1
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: paddb %xmm0, %xmm1
; X86-SSE-NEXT: pxor %xmm0, %xmm0
; X86-SSE-NEXT: psubb %xmm1, %xmm0
@@ -845,7 +845,7 @@ define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
; X86-SSE-LABEL: mul_v4i32_5_17_33_65:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v4i32_5_17_33_65:
@@ -864,7 +864,7 @@ define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_2_3_9_17_33_65_129_257(<8 x i16> %a0) nounwind {
; X86-SSE-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v8i16_2_3_9_17_33_65_129_257:
@@ -885,10 +885,10 @@ define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8>
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; X86-SSE-NEXT: pand %xmm2, %xmm0
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: pand %xmm2, %xmm1
; X86-SSE-NEXT: packuswb %xmm0, %xmm1
; X86-SSE-NEXT: movdqa %xmm1, %xmm0
@@ -974,7 +974,7 @@ define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind {
; X86-SSE-LABEL: mul_v4i32_7:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v4i32_7:
@@ -1004,7 +1004,7 @@ define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_7(<8 x i16> %a0) nounwind {
; X86-SSE-LABEL: mul_v8i16_7:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v8i16_7:
@@ -1025,7 +1025,7 @@ define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
; X86-SSE-NEXT: psllw $5, %xmm1
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: psubb %xmm0, %xmm1
; X86-SSE-NEXT: movdqa %xmm1, %xmm0
; X86-SSE-NEXT: retl
@@ -1097,7 +1097,7 @@ define <2 x i64> @mul_v2i64_neg7(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_neg63(<4 x i32> %a0) nounwind {
; X86-SSE-LABEL: mul_v4i32_neg63:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v4i32_neg63:
@@ -1127,7 +1127,7 @@ define <4 x i32> @mul_v4i32_neg63(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_neg31(<8 x i16> %a0) nounwind {
; X86-SSE-LABEL: mul_v8i16_neg31:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v8i16_neg31:
@@ -1148,7 +1148,7 @@ define <16 x i8> @mul_v16i8_neg15(<16 x i8> %a0) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
; X86-SSE-NEXT: psllw $4, %xmm1
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: psubb %xmm1, %xmm0
; X86-SSE-NEXT: retl
;
@@ -1503,7 +1503,7 @@ define <2 x i64> @mul_v2i64_15_neg_63(<2 x i64> %a0) nounwind {
define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
; X86-SSE-LABEL: mul_v4i32_0_15_31_7:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v4i32_0_15_31_7:
@@ -1522,7 +1522,7 @@ define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind {
define <8 x i16> @mul_v8i16_0_1_7_15_31_63_127_255(<8 x i16> %a0) nounwind {
; X86-SSE-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
;
; X64-SSE-LABEL: mul_v8i16_0_1_7_15_31_63_127_255:
diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll
index a00df716657b..9db77246915c 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-128.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll
@@ -265,9 +265,9 @@ define <4 x i32> @var_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; X86-SSE2-LABEL: var_rotate_v4i32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pslld $23, %xmm1
-; X86-SSE2-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
@@ -444,7 +444,7 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
;
; X86-SSE2-LABEL: var_rotate_v8i16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa %xmm1, %xmm2
; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
; X86-SSE2-NEXT: pslld $23, %xmm2
@@ -677,20 +677,20 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm3
; X86-SSE2-NEXT: movdqa %xmm2, %xmm4
; X86-SSE2-NEXT: psrlw $4, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: movdqa %xmm2, %xmm5
; X86-SSE2-NEXT: psllw $4, %xmm5
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm5
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm5
; X86-SSE2-NEXT: por %xmm4, %xmm5
; X86-SSE2-NEXT: pand %xmm3, %xmm5
; X86-SSE2-NEXT: pandn %xmm2, %xmm3
; X86-SSE2-NEXT: por %xmm5, %xmm3
; X86-SSE2-NEXT: movdqa %xmm3, %xmm2
; X86-SSE2-NEXT: psrlw $6, %xmm2
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: movdqa %xmm3, %xmm4
; X86-SSE2-NEXT: psllw $2, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: por %xmm2, %xmm4
; X86-SSE2-NEXT: paddb %xmm1, %xmm1
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
@@ -702,7 +702,7 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; X86-SSE2-NEXT: paddb %xmm2, %xmm3
; X86-SSE2-NEXT: movdqa %xmm2, %xmm4
; X86-SSE2-NEXT: psrlw $7, %xmm4
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm4
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm4
; X86-SSE2-NEXT: por %xmm3, %xmm4
; X86-SSE2-NEXT: paddb %xmm1, %xmm1
; X86-SSE2-NEXT: pcmpgtb %xmm1, %xmm0
@@ -1070,7 +1070,7 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
;
; X86-SSE2-LABEL: splatvar_rotate_v8i16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,0,0]
; X86-SSE2-NEXT: pand %xmm1, %xmm2
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
@@ -1285,7 +1285,7 @@ define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
;
; X86-SSE2-LABEL: splatvar_rotate_v16i8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X86-SSE2-NEXT: psubb %xmm1, %xmm2
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
@@ -1801,20 +1801,20 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
; X86-SSE2-NEXT: pxor %xmm1, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm2
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: psrlw $8, %xmm2
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm3
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: psrlw $8, %xmm3
; X86-SSE2-NEXT: packuswb %xmm2, %xmm3
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: pand %xmm2, %xmm1
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE2-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pand %xmm2, %xmm0
; X86-SSE2-NEXT: packuswb %xmm1, %xmm0
; X86-SSE2-NEXT: por %xmm3, %xmm0
@@ -2127,9 +2127,9 @@ define <16 x i8> @splatconstant_rotate_v16i8(<16 x i8> %a) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psllw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
; X86-SSE2-NEXT: retl
%shl = shl <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
@@ -2206,7 +2206,7 @@ define <2 x i64> @splatconstant_rotate_mask_v2i64(<2 x i64> %a) nounwind {
; X86-SSE2-LABEL: splatconstant_rotate_mask_v2i64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlq $49, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
%shl = shl <2 x i64> %a, <i64 15, i64 15>
%lshr = lshr <2 x i64> %a, <i64 49, i64 49>
@@ -2288,7 +2288,7 @@ define <4 x i32> @splatconstant_rotate_mask_v4i32(<4 x i32> %a) nounwind {
; X86-SSE2-NEXT: psrld $28, %xmm1
; X86-SSE2-NEXT: pslld $4, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
%shl = shl <4 x i32> %a, <i32 4, i32 4, i32 4, i32 4>
%lshr = lshr <4 x i32> %a, <i32 28, i32 28, i32 28, i32 28>
@@ -2372,7 +2372,7 @@ define <8 x i16> @splatconstant_rotate_mask_v8i16(<8 x i16> %a) nounwind {
; X86-SSE2-NEXT: psrlw $11, %xmm1
; X86-SSE2-NEXT: psllw $5, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
%shl = shl <8 x i16> %a, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
%lshr = lshr <8 x i16> %a, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
@@ -2465,11 +2465,11 @@ define <16 x i8> @splatconstant_rotate_mask_v16i8(<16 x i8> %a) nounwind {
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
; X86-SSE2-NEXT: psrlw $4, %xmm1
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: psllw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
%shl = shl <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
%lshr = lshr <16 x i8> %a, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
@@ -2551,7 +2551,7 @@ define <4 x i32> @rot16_demandedbits(<4 x i32> %x, <4 x i32> %y) nounwind {
; X86-SSE2-NEXT: psrld $11, %xmm1
; X86-SSE2-NEXT: pslld $11, %xmm0
; X86-SSE2-NEXT: por %xmm1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
%t0 = lshr <4 x i32> %x, <i32 11, i32 11, i32 11, i32 11>
%t1 = shl <4 x i32> %x, <i32 11, i32 11, i32 11, i32 11>
diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll
index 764701712749..c2769e7a9cc8 100644
--- a/llvm/test/CodeGen/X86/vector-sext.ll
+++ b/llvm/test/CodeGen/X86/vector-sext.ll
@@ -3791,7 +3791,7 @@ define <8 x i64> @sext_8i6_to_8i64(i32 %x) nounwind uwtable readnone ssp {
; X86-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
-; X86-SSE2-NEXT: paddw {{\.LCPI.*}}, %xmm3
+; X86-SSE2-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,0,0,0]
; X86-SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; X86-SSE2-NEXT: psllq $58, %xmm0
@@ -3835,7 +3835,7 @@ define <8 x i64> @sext_8i6_to_8i64(i32 %x) nounwind uwtable readnone ssp {
; X86-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; X86-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,0,0,0]
-; X86-SSE41-NEXT: paddw {{\.LCPI.*}}, %xmm3
+; X86-SSE41-NEXT: paddw {{\.LCPI[0-9]+_[0-9]+}}, %xmm3
; X86-SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; X86-SSE41-NEXT: psllq $58, %xmm0
; X86-SSE41-NEXT: movdqa %xmm0, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
index cb3382cf20d6..1ad32daeddc8 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -1255,11 +1255,11 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
; X86-SSE-NEXT: psraw $8, %xmm1
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: psrlw $8, %xmm1
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-SSE-NEXT: psraw $8, %xmm0
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm1, %xmm0
; X86-SSE-NEXT: retl
@@ -1447,7 +1447,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v16i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X86-SSE-NEXT: pxor %xmm1, %xmm0
; X86-SSE-NEXT: psubb %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
index a182335f06d1..81cadf70f446 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -1158,7 +1158,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
;
; X86-AVX2-LABEL: constant_shift_v8i32:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpsravd {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpsravd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
@@ -1230,18 +1230,18 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
;
; X86-AVX1-LABEL: constant_shift_v16i16:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm1
+; X86-AVX1-NEXT: vpmulhw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; X86-AVX1-NEXT: vpsraw $1, %xmm0, %xmm2
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5,6,7]
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X86-AVX1-NEXT: vpmulhw {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpmulhw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: constant_shift_v16i16:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpmulhw {{\.LCPI.*}}, %ymm0, %ymm1
+; X86-AVX2-NEXT: vpmulhw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1
; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; X86-AVX2-NEXT: vpsraw $1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2,3,4,5,6,7]
@@ -1379,11 +1379,11 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; X86-AVX2-NEXT: vpsraw $8, %ymm1, %ymm1
-; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm1, %ymm1
+; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; X86-AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; X86-AVX2-NEXT: vpsraw $8, %ymm0, %ymm0
-; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; X86-AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
@@ -1651,7 +1651,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; X86-AVX2-LABEL: splatconstant_shift_v32i8:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X86-AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
index a55dc2e16185..d9d5802da26a 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-sub128.ll
@@ -2016,7 +2016,7 @@ define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind {
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-SSE-NEXT: psraw $8, %xmm0
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -2104,7 +2104,7 @@ define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind {
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-SSE-NEXT: psraw $8, %xmm0
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -2192,7 +2192,7 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind {
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; X86-SSE-NEXT: psraw $8, %xmm0
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -2350,7 +2350,7 @@ define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v8i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X86-SSE-NEXT: pxor %xmm1, %xmm0
; X86-SSE-NEXT: psubb %xmm1, %xmm0
@@ -2403,7 +2403,7 @@ define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v4i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X86-SSE-NEXT: pxor %xmm1, %xmm0
; X86-SSE-NEXT: psubb %xmm1, %xmm0
@@ -2456,7 +2456,7 @@ define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v2i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X86-SSE-NEXT: pxor %xmm1, %xmm0
; X86-SSE-NEXT: psubb %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
index 530ebbe60920..0466721999ff 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -488,7 +488,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -497,7 +497,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -505,7 +505,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm1
; X86-SSE-NEXT: psrlw $1, %xmm0
; X86-SSE-NEXT: pand %xmm2, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm1, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <16 x i8> %a, %b
@@ -972,7 +972,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
; X86-SSE-NEXT: movdqa %xmm1, %xmm2
; X86-SSE-NEXT: pandn %xmm0, %xmm2
-; X86-SSE-NEXT: pmulhuw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pand %xmm1, %xmm0
; X86-SSE-NEXT: por %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -1073,10 +1073,10 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: movdqa %xmm0, %xmm2
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm2
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE-NEXT: psrlw $8, %xmm2
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -1223,7 +1223,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v16i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
index c995af7c430d..36d2470fac9d 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -504,14 +504,14 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; X86-AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsrlw $2, %ymm0, %ymm2
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X86-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsrlw $1, %ymm0, %ymm2
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X86-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
@@ -875,7 +875,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
;
; X86-AVX2-LABEL: constant_shift_v4i64:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpsrlvq {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpsrlvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
@@ -948,7 +948,7 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
;
; X86-AVX2-LABEL: constant_shift_v8i32:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpsrlvd {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpsrlvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
@@ -1015,16 +1015,16 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
;
; X86-AVX1-LABEL: constant_shift_v16i16:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpmulhuw {{\.LCPI.*}}, %xmm0, %xmm1
+; X86-AVX1-NEXT: vpmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X86-AVX1-NEXT: vpmulhuw {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: constant_shift_v16i16:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpmulhuw {{\.LCPI.*}}, %ymm0, %ymm1
+; X86-AVX2-NEXT: vpmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm1
; X86-AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3,4,5,6,7]
; X86-AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; X86-AVX2-NEXT: retl
@@ -1151,10 +1151,10 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
-; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm2, %ymm2
+; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X86-AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
; X86-AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
-; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; X86-AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
@@ -1384,7 +1384,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; X86-AVX2-LABEL: splatconstant_shift_v32i8:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
@@ -1454,7 +1454,7 @@ define <4 x i32> @sh_trunc_sh_vec(<4 x i64> %x) {
; X86-AVX1-NEXT: vpsrlq $36, %xmm1, %xmm1
; X86-AVX1-NEXT: vpsrlq $36, %xmm0, %xmm0
; X86-AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
-; X86-AVX1-NEXT: vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vzeroupper
; X86-AVX1-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
index 49cf4c0793d1..3e821a2d88e0 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-sub128.ll
@@ -595,7 +595,7 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -604,7 +604,7 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -612,7 +612,7 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm1
; X86-SSE-NEXT: psrlw $1, %xmm0
; X86-SSE-NEXT: pand %xmm2, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm1, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <8 x i8> %a, %b
@@ -745,7 +745,7 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -754,7 +754,7 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -762,7 +762,7 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm1
; X86-SSE-NEXT: psrlw $1, %xmm0
; X86-SSE-NEXT: pand %xmm2, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm1, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <4 x i8> %a, %b
@@ -895,7 +895,7 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -904,7 +904,7 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psrlw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -912,7 +912,7 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm1
; X86-SSE-NEXT: psrlw $1, %xmm0
; X86-SSE-NEXT: pand %xmm2, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm1, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <2 x i8> %a, %b
@@ -1543,7 +1543,7 @@ define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind {
; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,65535,65535,65535,65535,65535,65535,65535]
; X86-SSE-NEXT: movdqa %xmm1, %xmm2
; X86-SSE-NEXT: pandn %xmm0, %xmm2
-; X86-SSE-NEXT: pmulhuw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmulhuw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pand %xmm1, %xmm0
; X86-SSE-NEXT: por %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -1713,7 +1713,7 @@ define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind {
; X86-SSE-NEXT: movdqa %xmm0, %xmm2
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -1809,7 +1809,7 @@ define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind {
; X86-SSE-NEXT: movdqa %xmm0, %xmm2
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -1905,7 +1905,7 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind {
; X86-SSE-NEXT: movdqa %xmm0, %xmm2
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: psrlw $8, %xmm0
; X86-SSE-NEXT: packuswb %xmm2, %xmm0
; X86-SSE-NEXT: retl
@@ -2052,7 +2052,7 @@ define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v8i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <8 x i8> %shift
@@ -2091,7 +2091,7 @@ define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v4i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <4 x i8> %a, <i8 3, i8 3, i8 3, i8 3>
ret <4 x i8> %shift
@@ -2130,7 +2130,7 @@ define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v2i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psrlw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = lshr <2 x i8> %a, <i8 3, i8 3>
ret <2 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index d2d958bf3286..0c7a6fa03cfe 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -140,7 +140,7 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; X86-SSE-LABEL: var_shift_v4i32:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pslld $23, %xmm1
-; X86-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE-NEXT: pmuludq %xmm1, %xmm0
@@ -402,7 +402,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -411,7 +411,7 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -844,7 +844,7 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
;
; X86-SSE-LABEL: constant_shift_v8i16:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = shl <8 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>
ret <8 x i16> %shift
@@ -942,11 +942,11 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
; X86-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; X86-SSE-NEXT: pand %xmm2, %xmm1
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pand %xmm2, %xmm0
; X86-SSE-NEXT: packuswb %xmm1, %xmm0
; X86-SSE-NEXT: retl
@@ -1093,7 +1093,7 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v16i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psllw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = shl <16 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <16 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
index 758de4e64f73..da3cebc47586 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -435,10 +435,10 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsllw $5, %ymm1, %ymm1
; X86-AVX2-NEXT: vpsllw $4, %ymm0, %ymm2
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsllw $2, %ymm0, %ymm2
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm2, %ymm2
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X86-AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; X86-AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; X86-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm2
@@ -800,7 +800,7 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
;
; X86-AVX2-LABEL: constant_shift_v4i64:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpsllvq {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpsllvq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = shl <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
@@ -845,15 +845,15 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
;
; X86-AVX1-LABEL: constant_shift_v8i32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm1
+; X86-AVX1-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X86-AVX1-NEXT: vpmulld {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: constant_shift_v8i32:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpsllvd {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpsllvd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
@@ -911,15 +911,15 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
;
; X86-AVX1-LABEL: constant_shift_v16i16:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm1
+; X86-AVX1-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm1
; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; X86-AVX1-NEXT: vpmullw {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: constant_shift_v16i16:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpmullw {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpmullw {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
@@ -1055,12 +1055,12 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; X86-AVX2-LABEL: constant_shift_v32i8:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; X86-AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm2 = [8192,24640,41088,57536,49376,32928,16480,32,8192,24640,41088,57536,49376,32928,16480,32]
; X86-AVX2-NEXT: # ymm2 = mem[0,1,0,1]
; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpsllw $2, %ymm0, %ymm1
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm1
; X86-AVX2-NEXT: vpaddb %ymm2, %ymm2, %ymm2
; X86-AVX2-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0
; X86-AVX2-NEXT: vpaddb %ymm0, %ymm0, %ymm1
@@ -1293,7 +1293,7 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; X86-AVX2-LABEL: splatconstant_shift_v32i8:
; X86-AVX2: # %bb.0:
; X86-AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-AVX2-NEXT: retl
%shift = shl <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll
index 0861a5f4e402..60f21f3fb50c 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-sub128.ll
@@ -76,7 +76,7 @@ define <2 x i32> @var_shift_v2i32(<2 x i32> %a, <2 x i32> %b) nounwind {
; X86-SSE-LABEL: var_shift_v2i32:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: pslld $23, %xmm1
-; X86-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm1
+; X86-SSE-NEXT: paddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE-NEXT: cvttps2dq %xmm1, %xmm1
; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-SSE-NEXT: pmuludq %xmm1, %xmm0
@@ -465,7 +465,7 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -474,7 +474,7 @@ define <8 x i8> @var_shift_v8i8(<8 x i8> %a, <8 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -609,7 +609,7 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -618,7 +618,7 @@ define <4 x i8> @var_shift_v4i8(<4 x i8> %a, <4 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -753,7 +753,7 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $4, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pxor %xmm3, %xmm3
@@ -762,7 +762,7 @@ define <2 x i8> @var_shift_v2i8(<2 x i8> %a, <2 x i8> %b) nounwind {
; X86-SSE-NEXT: pandn %xmm0, %xmm4
; X86-SSE-NEXT: psllw $2, %xmm0
; X86-SSE-NEXT: pand %xmm3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: por %xmm4, %xmm0
; X86-SSE-NEXT: paddb %xmm1, %xmm1
; X86-SSE-NEXT: pcmpgtb %xmm1, %xmm2
@@ -1369,7 +1369,7 @@ define <4 x i16> @constant_shift_v4i16(<4 x i16> %a) nounwind {
;
; X86-SSE-LABEL: constant_shift_v4i16:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = shl <4 x i16> %a, <i16 0, i16 1, i16 2, i16 3>
ret <4 x i16> %shift
@@ -1431,7 +1431,7 @@ define <2 x i16> @constant_shift_v2i16(<2 x i16> %a) nounwind {
;
; X86-SSE-LABEL: constant_shift_v2i16:
; X86-SSE: # %bb.0:
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = shl <2 x i16> %a, <i16 2, i16 3>
ret <2 x i16> %shift
@@ -1517,8 +1517,8 @@ define <8 x i8> @constant_shift_v8i8(<8 x i8> %a) nounwind {
; X86-SSE-LABEL: constant_shift_v8i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: packuswb %xmm1, %xmm0
; X86-SSE-NEXT: retl
@@ -1606,8 +1606,8 @@ define <4 x i8> @constant_shift_v4i8(<4 x i8> %a) nounwind {
; X86-SSE-LABEL: constant_shift_v4i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: packuswb %xmm1, %xmm0
; X86-SSE-NEXT: retl
@@ -1695,8 +1695,8 @@ define <2 x i8> @constant_shift_v2i8(<2 x i8> %a) nounwind {
; X86-SSE-LABEL: constant_shift_v2i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; X86-SSE-NEXT: pmullw {{\.LCPI.*}}, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: pxor %xmm1, %xmm1
; X86-SSE-NEXT: packuswb %xmm1, %xmm0
; X86-SSE-NEXT: retl
@@ -1843,7 +1843,7 @@ define <8 x i8> @splatconstant_shift_v8i8(<8 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v8i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psllw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = shl <8 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <8 x i8> %shift
@@ -1882,7 +1882,7 @@ define <4 x i8> @splatconstant_shift_v4i8(<4 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v4i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psllw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = shl <4 x i8> %a, <i8 3, i8 3, i8 3, i8 3>
ret <4 x i8> %shift
@@ -1921,7 +1921,7 @@ define <2 x i8> @splatconstant_shift_v2i8(<2 x i8> %a) nounwind {
; X86-SSE-LABEL: splatconstant_shift_v2i8:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: psllw $3, %xmm0
-; X86-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE-NEXT: retl
%shift = shl <2 x i8> %a, <i8 3, i8 3>
ret <2 x i8> %shift
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
index 422f64d982bf..1e1224ee5eab 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll
@@ -345,7 +345,7 @@ define <32 x i16> @test_mm512_mask_blend_epi16(<32 x i16> %A, <32 x i16> %W){
;
; KNL32-LABEL: test_mm512_mask_blend_epi16:
; KNL32: # %bb.0: # %entry
-; KNL32-NEXT: vpternlogd $216, {{\.LCPI.*}}{1to16}, %zmm1, %zmm0
+; KNL32-NEXT: vpternlogd $216, {{\.LCPI[0-9]+_[0-9]+}}{1to16}, %zmm1, %zmm0
; KNL32-NEXT: retl
entry:
%0 = shufflevector <32 x i16> %A, <32 x i16> %W, <32 x i32> <i32 32, i32 1, i32 34, i32 3, i32 36, i32 5, i32 38, i32 7, i32 40, i32 9, i32 42, i32 11, i32 44, i32 13, i32 46, i32 15, i32 48, i32 17, i32 50, i32 19, i32 52, i32 21, i32 54, i32 23, i32 56, i32 25, i32 58, i32 27, i32 60, i32 29, i32 62, i32 31>
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index 7187425aee0f..32303f867e4b 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -77,7 +77,7 @@ define <4 x i64> @combine_permq_pshufb_as_vextracti128(<4 x i64> %a0) {
; X86-LABEL: combine_permq_pshufb_as_vextracti128:
; X86: # %bb.0:
; X86-NEXT: vextracti128 $1, %ymm0, %xmm0
-; X86-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_permq_pshufb_as_vextracti128:
@@ -97,7 +97,7 @@ define <4 x i64> @combine_permq_pshufb_as_vmovdqa(<4 x i64> %a0) {
; X86-LABEL: combine_permq_pshufb_as_vmovdqa:
; X86: # %bb.0:
; X86-NEXT: vmovdqa %xmm0, %xmm0
-; X86-NEXT: vpaddq {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_permq_pshufb_as_vmovdqa:
@@ -210,7 +210,7 @@ define <16 x i8> @combine_pshufb_as_vpbroadcastd128(<16 x i8> %a) {
; X86-LABEL: combine_pshufb_as_vpbroadcastd128:
; X86: # %bb.0:
; X86-NEXT: vpbroadcastd %xmm0, %xmm0
-; X86-NEXT: vpaddb {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vpaddb {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: combine_pshufb_as_vpbroadcastd128:
@@ -227,7 +227,7 @@ define <8 x i32> @combine_permd_as_vpbroadcastd256(<4 x i32> %a) {
; X86-LABEL: combine_permd_as_vpbroadcastd256:
; X86: # %bb.0:
; X86-NEXT: vpbroadcastd %xmm0, %ymm0
-; X86-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastd256:
@@ -254,7 +254,7 @@ define <8 x i32> @combine_permd_as_vpbroadcastq256(<4 x i32> %a) {
; X86-LABEL: combine_permd_as_vpbroadcastq256:
; X86: # %bb.0:
; X86-NEXT: vpbroadcastq %xmm0, %ymm0
-; X86-NEXT: vpaddd {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_permd_as_vpbroadcastq256:
@@ -543,7 +543,7 @@ define <32 x i8> @combine_pshufb_as_unpackhi_zero(<32 x i8> %a0) {
define <32 x i8> @combine_psrlw_pshufb(<16 x i16> %a0) {
; X86-LABEL: combine_psrlw_pshufb:
; X86: # %bb.0:
-; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_psrlw_pshufb:
@@ -559,7 +559,7 @@ define <32 x i8> @combine_psrlw_pshufb(<16 x i16> %a0) {
define <32 x i8> @combine_pslld_pshufb(<8 x i32> %a0) {
; X86-LABEL: combine_pslld_pshufb:
; X86: # %bb.0:
-; X86-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
+; X86-NEXT: vandps {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
; X86-NEXT: retl
;
; X64-LABEL: combine_pslld_pshufb:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
index 56834cc23116..9d61fd2e8ecd 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512f.ll
@@ -901,8 +901,8 @@ define <8 x double> @combine_vpermi2var_8f64_as_permpd(<8 x double> %x0, <8 x do
; X86: # %bb.0:
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0]
-; X86-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm2, %ymm2
-; X86-NEXT: vinsertf64x4 $1, {{\.LCPI.*}}, %zmm2, %zmm2
+; X86-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
+; X86-NEXT: vinsertf64x4 $1, {{\.LCPI[0-9]+_[0-9]+}}, %zmm2, %zmm2
; X86-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2
; X86-NEXT: vpermpd {{.*#+}} zmm0 = zmm2[2,3,1,1,6,7,5,5]
; X86-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll
index f758a1d32c13..b453517c2885 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-xop.ll
@@ -135,7 +135,7 @@ define <4 x double> @demandedelts_vpermil2pd256_as_shufpd(<4 x double> %a0, <4 x
; X86: # %bb.0:
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],mem[0]
-; X86-NEXT: vinsertf128 $1, {{\.LCPI.*}}, %ymm2, %ymm2
+; X86-NEXT: vinsertf128 $1, {{\.LCPI[0-9]+_[0-9]+}}, %ymm2, %ymm2
; X86-NEXT: vpermil2pd $0, %ymm2, %ymm1, %ymm0, %ymm0
; X86-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,3]
; X86-NEXT: retl
@@ -174,7 +174,7 @@ define <16 x i8> @combine_vpperm_zero(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @combine_vpperm_identity_bitcast(<16 x i8> %a0, <16 x i8> %a1) {
; X86-LABEL: combine_vpperm_identity_bitcast:
; X86: # %bb.0:
-; X86-NEXT: vpaddq {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-NEXT: vpaddq {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: combine_vpperm_identity_bitcast:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index baa0da8fabfd..18cba47834db 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3112,10 +3112,10 @@ define void @PR43024() {
; AVX: # %bb.0:
; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [NaN,NaN,0.0E+0,0.0E+0]
; AVX-NEXT: vmovaps %xmm0, (%rax)
-; AVX-NEXT: vaddss {{\.LCPI.*}}+{{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vaddss {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vaddss {{\.LCPI.*}}+{{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vaddss {{\.LCPI[0-9]+_[0-9]+}}+{{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: vmovss %xmm0, (%rax)
; AVX-NEXT: retq
store <4 x float> <float 0x7FF8000000000000, float 0x7FF8000000000000, float 0x0, float 0x0>, <4 x float>* undef, align 16
diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
index 137ce2d61883..cf42ed3c2612 100644
--- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
+++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
@@ -17,16 +17,16 @@
define <16 x i8> @test_128_i8_x_16_7_mask_lshr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_lshr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1:
@@ -50,13 +50,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_lshr_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -78,13 +78,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_2(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_2:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $2, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_2:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -106,16 +106,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_2(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_3(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_3:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3:
@@ -138,16 +138,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_3(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_lshr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4:
@@ -171,13 +171,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_4(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_224_mask_lshr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -199,13 +199,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_224_mask_lshr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -228,13 +228,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_5(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $5, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5:
@@ -256,13 +256,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_6(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $6, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6:
@@ -286,16 +286,16 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_6(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_7_mask_ashr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_ashr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1:
@@ -319,13 +319,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_ashr_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -347,13 +347,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_2(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_2:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $2, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_2:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $2, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -375,16 +375,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_2(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_3(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_3:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3:
@@ -407,16 +407,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_3(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_ashr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4:
@@ -440,7 +440,7 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_4(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
@@ -449,7 +449,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -480,7 +480,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
@@ -489,7 +489,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) {
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
@@ -521,7 +521,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_5(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $5, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
@@ -530,7 +530,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_5(<16 x i8> %a0) {
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
@@ -561,7 +561,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psrlw $6, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; X86-SSE2-NEXT: pxor %xmm1, %xmm0
; X86-SSE2-NEXT: psubb %xmm1, %xmm0
@@ -570,7 +570,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
; X86-AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X86-AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
@@ -603,13 +603,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_7_mask_shl_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddb %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -631,13 +631,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_7_mask_shl_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -660,13 +660,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_5(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllw $5, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllw $5, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5:
@@ -688,13 +688,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_6(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: psllw $6, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vpsllw $6, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6:
@@ -716,13 +716,13 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_6(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_shl_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddb %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -744,13 +744,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_1(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_shl_2(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_2:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $2, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_2:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $2, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -772,13 +772,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_2(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_shl_3(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_3:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_3:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -800,16 +800,16 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_3(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_28_mask_shl_4(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $4, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4:
@@ -833,13 +833,13 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_4(<16 x i8> %a0) {
define <16 x i8> @test_128_i8_x_16_224_mask_shl_1(<16 x i8> %a0) {
; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddb %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i8_x_16_224_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -868,13 +868,13 @@ define <16 x i8> @test_128_i8_x_16_224_mask_shl_1(<16 x i8> %a0) {
define <8 x i16> @test_128_i16_x_8_127_mask_lshr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_lshr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -897,13 +897,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_lshr_1(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -925,13 +925,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -953,13 +953,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $5, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -981,13 +981,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_6(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $6, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1010,13 +1010,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_6(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1038,13 +1038,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_1(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_8(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1117,13 +1117,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_10(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_127_mask_ashr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_ashr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1146,13 +1146,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_ashr_1(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1174,13 +1174,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1202,13 +1202,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $5, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $5, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1230,13 +1230,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_6(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlw $6, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlw $6, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1259,13 +1259,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_6(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psraw $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsraw $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1287,13 +1287,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_1(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_8(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psraw $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsraw $8, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1366,13 +1366,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1394,13 +1394,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_127_mask_shl_8(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_8:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $8, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1471,13 +1471,13 @@ define <8 x i16> @test_128_i16_x_8_127_mask_shl_10(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_3:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $3, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_3:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1499,13 +1499,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_4:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $4, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_4:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $4, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1527,13 +1527,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_5:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $5, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_5:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $5, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1555,13 +1555,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_6:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllw $6, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_6:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllw $6, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1584,13 +1584,13 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -1619,13 +1619,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) {
define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -1662,13 +1662,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $7, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -1704,13 +1704,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_8(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -1746,13 +1746,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_8(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_9(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -1788,13 +1788,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_9(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_10(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -1831,13 +1831,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_10(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -1873,13 +1873,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_1(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_16(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -1966,13 +1966,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_18(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_32767_mask_ashr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2009,13 +2009,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_ashr_1(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_7(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $7, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $7, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2051,13 +2051,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_7(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_8(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $8, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2093,13 +2093,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_8(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_9(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $9, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2135,13 +2135,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_9(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_10(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrld $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrld $10, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2178,13 +2178,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_10(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrad $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2220,13 +2220,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_1(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_16(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrad $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2313,13 +2313,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2355,13 +2355,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_16(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pslld $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_16:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $16, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2446,13 +2446,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_18(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_7(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pslld $7, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $7, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2488,13 +2488,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_7(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_8(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pslld $8, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $8, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2530,13 +2530,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_8(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_9(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pslld $9, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $9, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2572,13 +2572,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_9(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pslld $10, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpslld $10, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2615,13 +2615,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2664,13 +2664,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
define <2 x i64> @test_128_i64_x_2_2147483647_mask_lshr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2693,13 +2693,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_lshr_1(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_15(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $15, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2721,13 +2721,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_15(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_16(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2749,13 +2749,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_16(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_17(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2777,13 +2777,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_17(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_18(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2806,13 +2806,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_18(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2834,13 +2834,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_1(<2 x i64> %a
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_32(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $32, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrlq $32, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -2920,13 +2920,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_34(<2 x i64> %
define <2 x i64> @test_128_i64_x_2_2147483647_mask_ashr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $1, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2949,13 +2949,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_ashr_1(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_15(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $15, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $15, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -2977,13 +2977,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_15(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_16(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $16, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3005,13 +3005,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_16(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_17(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3033,13 +3033,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_17(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_18(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrlq $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsrlq $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3062,14 +3062,14 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_18(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psrad $1, %xmm0
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X86-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -3113,7 +3113,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
; X86-SSE2-NEXT: psrad $31, %xmm0
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -3123,7 +3123,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %
;
; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -3131,7 +3131,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %
;
; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX2-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX2-NEXT: vpsrad $31, %xmm0, %xmm1
; X86-AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X86-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
@@ -3284,13 +3284,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> %
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3312,13 +3312,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_32(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllq $32, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpsllq $32, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
@@ -3396,13 +3396,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_34(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_15(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllq $15, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $15, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3424,13 +3424,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_15(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_16(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllq $16, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $16, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3452,13 +3452,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_16(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_17(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllq $17, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $17, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3480,13 +3480,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_17(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: psllq $18, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpsllq $18, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
@@ -3509,13 +3509,13 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
diff --git a/llvm/test/CodeGen/X86/vshift-6.ll b/llvm/test/CodeGen/X86/vshift-6.ll
index 471ea5ad5c93..3c0081d049f4 100644
--- a/llvm/test/CodeGen/X86/vshift-6.ll
+++ b/llvm/test/CodeGen/X86/vshift-6.ll
@@ -42,7 +42,7 @@ define <16 x i8> @do_not_crash(i8*, i32*, i64*, i32, i64, i8) {
; X86-NEXT: pxor %xmm0, %xmm0
; X86-NEXT: pcmpgtb %xmm1, %xmm0
; X86-NEXT: pxor %xmm0, %xmm2
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: por %xmm2, %xmm0
; X86-NEXT: paddb %xmm1, %xmm1
; X86-NEXT: pxor %xmm2, %xmm2
@@ -51,7 +51,7 @@ define <16 x i8> @do_not_crash(i8*, i32*, i64*, i32, i64, i8) {
; X86-NEXT: pandn %xmm0, %xmm4
; X86-NEXT: psllw $2, %xmm0
; X86-NEXT: pand %xmm2, %xmm0
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: por %xmm4, %xmm0
; X86-NEXT: paddb %xmm1, %xmm1
; X86-NEXT: pcmpgtb %xmm1, %xmm3
diff --git a/llvm/test/CodeGen/X86/widen_load-2.ll b/llvm/test/CodeGen/X86/widen_load-2.ll
index c9531f2c9ced..a14736bd6837 100644
--- a/llvm/test/CodeGen/X86/widen_load-2.ll
+++ b/llvm/test/CodeGen/X86/widen_load-2.ll
@@ -359,7 +359,7 @@ define void @rot(%i8vec3pack* nocapture sret(%i8vec3pack) %result, %i8vec3pack*
; X86-NEXT: movw $257, (%ecx) # imm = 0x101
; X86-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: psrlw $1, %xmm0
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: pextrb $2, %xmm0, 2(%eax)
; X86-NEXT: pextrw $0, %xmm0, (%eax)
; X86-NEXT: retl $4
diff --git a/llvm/test/CodeGen/X86/x86-shifts.ll b/llvm/test/CodeGen/X86/x86-shifts.ll
index 6a7089967cf6..3063666dabeb 100644
--- a/llvm/test/CodeGen/X86/x86-shifts.ll
+++ b/llvm/test/CodeGen/X86/x86-shifts.ll
@@ -131,7 +131,7 @@ define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind {
; X86: # %bb.0: # %entry
; X86-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4]
; X86-NEXT: pmullw %xmm0, %xmm1
-; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: pxor %xmm1, %xmm0
; X86-NEXT: retl
;
@@ -204,7 +204,7 @@ define <16 x i8> @shl9(<16 x i8> %A) nounwind {
; X86-LABEL: shl9:
; X86: # %bb.0:
; X86-NEXT: psllw $3, %xmm0
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: shl9:
@@ -220,7 +220,7 @@ define <16 x i8> @shr9(<16 x i8> %A) nounwind {
; X86-LABEL: shr9:
; X86: # %bb.0:
; X86-NEXT: psrlw $3, %xmm0
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: shr9:
@@ -247,7 +247,7 @@ define <16 x i8> @sra_v16i8(<16 x i8> %A) nounwind {
; X86-LABEL: sra_v16i8:
; X86: # %bb.0:
; X86-NEXT: psrlw $3, %xmm0
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm0
+; X86-NEXT: pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
; X86-NEXT: pxor %xmm1, %xmm0
; X86-NEXT: psubb %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/xop-mask-comments.ll b/llvm/test/CodeGen/X86/xop-mask-comments.ll
index 3e5bb351c5d1..418a6479810d 100644
--- a/llvm/test/CodeGen/X86/xop-mask-comments.ll
+++ b/llvm/test/CodeGen/X86/xop-mask-comments.ll
@@ -55,7 +55,7 @@ define <16 x i8> @vpperm_shuffle_binary_zero(<16 x i8> %a0, <16 x i8> %a1) {
define <16 x i8> @vpperm_shuffle_general(<16 x i8> %a0, <16 x i8> %a1) {
; X86-LABEL: vpperm_shuffle_general:
; X86: # %bb.0:
-; X86-NEXT: vpperm {{\.LCPI.*}}, %xmm0, %xmm0, %xmm0
+; X86-NEXT: vpperm {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: vpperm_shuffle_general:
diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll
index 8867a4d8b54f..9b77c56c3a81 100644
--- a/llvm/test/CodeGen/X86/xor.ll
+++ b/llvm/test/CodeGen/X86/xor.ll
@@ -373,7 +373,7 @@ define i32 @test9(i32 %a) nounwind {
define <4 x i32> @test10(<4 x i32> %a) nounwind {
; X86-LABEL: test10:
; X86: # %bb.0:
-; X86-NEXT: andnps {{\.LCPI.*}}, %xmm0
+; X86-NEXT: andnps {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-NEXT: retl
;
; X64-LIN-LABEL: test10:
diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py
index 3cbedfeaccf2..b5546e7d2411 100644
--- a/llvm/utils/UpdateTestChecks/asm.py
+++ b/llvm/utils/UpdateTestChecks/asm.py
@@ -197,7 +197,7 @@ def scrub_asm_x86(asm, args):
# Generically match a RIP-relative memory operand.
asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
# Generically match a LCP symbol.
- asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI.*}}', asm)
+ asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI[0-9]+_[0-9]+}}', asm)
if getattr(args, 'extra_scrub', False):
# Avoid generating
diff erent checks for 32- and 64-bit because of 'retl' vs 'retq'.
asm = SCRUB_X86_RET_RE.sub(r'ret{{[l|q]}}', asm)
More information about the llvm-commits
mailing list