[llvm] 629e2b3 - [X86][SSE] Regenerate some tests to expose the rip relative vector/broadcast loads

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed May 26 06:51:10 PDT 2021


Author: Simon Pilgrim
Date: 2021-05-26T14:50:47+01:00
New Revision: 629e2b3442257937486bd7a5c8239c173492963e

URL: https://github.com/llvm/llvm-project/commit/629e2b3442257937486bd7a5c8239c173492963e
DIFF: https://github.com/llvm/llvm-project/commit/629e2b3442257937486bd7a5c8239c173492963e.diff

LOG: [X86][SSE] Regenerate some tests to expose the rip relative vector/broadcast loads

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx2-arith.ll
    llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
    llvm/test/CodeGen/X86/combine-mul.ll
    llvm/test/CodeGen/X86/combine-rotates.ll
    llvm/test/CodeGen/X86/oddsubvector.ll
    llvm/test/CodeGen/X86/sse-domains.ll
    llvm/test/CodeGen/X86/sse2-vector-shifts.ll
    llvm/test/CodeGen/X86/vec_shift6.ll
    llvm/test/CodeGen/X86/vector-gep.ll
    llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
    llvm/test/CodeGen/X86/x86-shifts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll
index 960fbf069bad..52e738acad1f 100644
--- a/llvm/test/CodeGen/X86/avx2-arith.ll
+++ b/llvm/test/CodeGen/X86/avx2-arith.ll
@@ -148,7 +148,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
 ; X32-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
 ; X32-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; X32-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X32-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
 ; X32-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X32-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
 ; X32-NEXT:    vzeroupper
@@ -159,7 +159,7 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
 ; X64-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
 ; X64-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; X64-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; X64-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; X64-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
 ; X64-NEXT:    vzeroupper
@@ -303,12 +303,12 @@ define <8 x i32> @mul_const5(<8 x i32> %x) {
 define <8 x i32> @mul_const6(<8 x i32> %x) {
 ; X32-LABEL: mul_const6:
 ; X32:       # %bb.0:
-; X32-NEXT:    vpmulld {{\.LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X32-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
 ; X32-NEXT:    retl
 ;
 ; X64-LABEL: mul_const6:
 ; X64:       # %bb.0:
-; X64-NEXT:    vpmulld {{.*}}(%rip), %ymm0, %ymm0
+; X64-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; X64-NEXT:    retq
   %y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 2, i32 0, i32 2, i32 0, i32 0>
   ret <8 x i32> %y

diff  --git a/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll b/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
index c3d5f23d0fd2..c8764f725473 100644
--- a/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
+++ b/llvm/test/CodeGen/X86/avx512-broadcast-unfold.ll
@@ -3345,7 +3345,7 @@ define void @bcast_unfold_pcmpgt_v4i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
 ; CHECK-NEXT:    vpcmpgtd %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    jne .LBB96_1
@@ -3381,7 +3381,7 @@ define void @bcast_unfold_pcmpgt_v8i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm1
 ; CHECK-NEXT:    vpcmpgtd %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $32, %rax
 ; CHECK-NEXT:    jne .LBB97_1
@@ -3418,7 +3418,7 @@ define void @bcast_unfold_pcmpgt_v16i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm1
 ; CHECK-NEXT:    vpcmpgtd %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $64, %rax
 ; CHECK-NEXT:    jne .LBB98_1
@@ -3455,7 +3455,7 @@ define void @bcast_unfold_pcmpgt_v2i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %xmm1
 ; CHECK-NEXT:    vpcmpgtq %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vmovdqa64 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    jne .LBB99_1
@@ -3490,7 +3490,7 @@ define void @bcast_unfold_pcmpgt_v4i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
 ; CHECK-NEXT:    vpcmpgtq %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $32, %rax
 ; CHECK-NEXT:    jne .LBB100_1
@@ -3527,7 +3527,7 @@ define void @bcast_unfold_pcmpgt_v8i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm1
 ; CHECK-NEXT:    vpcmpgtq %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $64, %rax
 ; CHECK-NEXT:    jne .LBB101_1
@@ -3564,7 +3564,7 @@ define void @bcast_unfold_pcmpeq_v4i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
 ; CHECK-NEXT:    vpcmpeqd %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    jne .LBB102_1
@@ -3600,7 +3600,7 @@ define void @bcast_unfold_pcmpeq_v8i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm1
 ; CHECK-NEXT:    vpcmpeqd %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $32, %rax
 ; CHECK-NEXT:    jne .LBB103_1
@@ -3637,7 +3637,7 @@ define void @bcast_unfold_pcmpeq_v16i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm1
 ; CHECK-NEXT:    vpcmpeqd %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $64, %rax
 ; CHECK-NEXT:    jne .LBB104_1
@@ -3674,7 +3674,7 @@ define void @bcast_unfold_pcmpeq_v2i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %xmm1
 ; CHECK-NEXT:    vpcmpeqq %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vmovdqa64 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    jne .LBB105_1
@@ -3709,7 +3709,7 @@ define void @bcast_unfold_pcmpeq_v4i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
 ; CHECK-NEXT:    vpcmpeqq %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $32, %rax
 ; CHECK-NEXT:    jne .LBB106_1
@@ -3746,7 +3746,7 @@ define void @bcast_unfold_pcmpeq_v8i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm1
 ; CHECK-NEXT:    vpcmpeqq %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $64, %rax
 ; CHECK-NEXT:    jne .LBB107_1
@@ -3783,7 +3783,7 @@ define void @bcast_unfold_pcmp_v4i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %xmm1
 ; CHECK-NEXT:    vpcmpltd %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, (%rdi,%rax,4)
 ; CHECK-NEXT:    addq $4, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -3820,7 +3820,7 @@ define void @bcast_unfold_pcmp_v8i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %ymm1
 ; CHECK-NEXT:    vpcmpltd %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, (%rdi,%rax,4)
 ; CHECK-NEXT:    addq $8, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -3858,7 +3858,7 @@ define void @bcast_unfold_pcmp_v16i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,4), %zmm1
 ; CHECK-NEXT:    vpcmpltd %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, (%rdi,%rax,4)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -3896,7 +3896,7 @@ define void @bcast_unfold_pcmp_v2i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %xmm1
 ; CHECK-NEXT:    vpcmpltq %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vmovdqa64 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, (%rdi,%rax,8)
 ; CHECK-NEXT:    addq $2, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -3932,7 +3932,7 @@ define void @bcast_unfold_pcmp_v4i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %ymm1
 ; CHECK-NEXT:    vpcmpltq %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, (%rdi,%rax,8)
 ; CHECK-NEXT:    addq $4, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -3970,7 +3970,7 @@ define void @bcast_unfold_pcmp_v8i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,8), %zmm1
 ; CHECK-NEXT:    vpcmpltq %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, (%rdi,%rax,8)
 ; CHECK-NEXT:    addq $8, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -4008,7 +4008,7 @@ define void @bcast_unfold_pcmpu_v4i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %xmm1
 ; CHECK-NEXT:    vpcmpltud %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, (%rdi,%rax,4)
 ; CHECK-NEXT:    addq $4, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -4045,7 +4045,7 @@ define void @bcast_unfold_pcmpu_v8i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,4), %ymm1
 ; CHECK-NEXT:    vpcmpltud %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, (%rdi,%rax,4)
 ; CHECK-NEXT:    addq $8, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -4083,7 +4083,7 @@ define void @bcast_unfold_pcmpu_v16i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,4), %zmm1
 ; CHECK-NEXT:    vpcmpltud %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, (%rdi,%rax,4)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -4121,7 +4121,7 @@ define void @bcast_unfold_pcmpu_v2i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %xmm1
 ; CHECK-NEXT:    vpcmpltuq %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vmovdqa64 {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vmovdqa64 {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, (%rdi,%rax,8)
 ; CHECK-NEXT:    addq $2, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -4157,7 +4157,7 @@ define void @bcast_unfold_pcmpu_v4i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu (%rdi,%rax,8), %ymm1
 ; CHECK-NEXT:    vpcmpltuq %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, (%rdi,%rax,8)
 ; CHECK-NEXT:    addq $4, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -4195,7 +4195,7 @@ define void @bcast_unfold_pcmpu_v8i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu64 (%rdi,%rax,8), %zmm1
 ; CHECK-NEXT:    vpcmpltuq %zmm0, %zmm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %zmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu64 %zmm1, (%rdi,%rax,8)
 ; CHECK-NEXT:    addq $8, %rax
 ; CHECK-NEXT:    cmpq $1023, %rax # imm = 0x3FF
@@ -4458,7 +4458,7 @@ define void @bcast_unfold_cmp_v8f32_refold(float* nocapture %0) {
 ; CHECK-NEXT:    .p2align 4, 0x90
 ; CHECK-NEXT:  .LBB126_1: # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vcmpgtps 4096(%rdi,%rax), %ymm0, %k1
-; CHECK-NEXT:    vblendmps {{.*}}(%rip){1to8}, %ymm1, %ymm2 {%k1}
+; CHECK-NEXT:    vblendmps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm2 {%k1}
 ; CHECK-NEXT:    vmovups %ymm2, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $32, %rax
 ; CHECK-NEXT:    jne .LBB126_1
@@ -4494,7 +4494,7 @@ define void @bcast_unfold_ptestm_v4i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
 ; CHECK-NEXT:    vptestmd %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    jne .LBB127_1
@@ -4531,7 +4531,7 @@ define void @bcast_unfold_ptestnm_v4i32(i32* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm1
 ; CHECK-NEXT:    vptestnmd %xmm0, %xmm1, %k1
-; CHECK-NEXT:    vpbroadcastd {{.*}}(%rip), %xmm1 {%k1}
+; CHECK-NEXT:    vpbroadcastd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %xmm1, 4096(%rdi,%rax)
 ; CHECK-NEXT:    addq $16, %rax
 ; CHECK-NEXT:    jne .LBB128_1
@@ -4568,7 +4568,7 @@ define void @bcast_unfold_ptestm_v4i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
 ; CHECK-NEXT:    vptestmq %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $32, %rax
 ; CHECK-NEXT:    jne .LBB129_1
@@ -4606,7 +4606,7 @@ define void @bcast_unfold_ptestnm_v4i64(i64* %arg) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm1
 ; CHECK-NEXT:    vptestnmq %ymm0, %ymm1, %k1
-; CHECK-NEXT:    vpbroadcastq {{.*}}(%rip), %ymm1 {%k1}
+; CHECK-NEXT:    vpbroadcastq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1 {%k1}
 ; CHECK-NEXT:    vmovdqu %ymm1, 8192(%rdi,%rax)
 ; CHECK-NEXT:    addq $32, %rax
 ; CHECK-NEXT:    jne .LBB130_1

diff  --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll
index df33ef6e09ce..762761b02c21 100644
--- a/llvm/test/CodeGen/X86/combine-mul.ll
+++ b/llvm/test/CodeGen/X86/combine-mul.ll
@@ -66,12 +66,12 @@ define <4 x i32> @combine_vec_mul_pow2a(<4 x i32> %x) {
 define <4 x i32> @combine_vec_mul_pow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_pow2b:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_pow2b:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, <i32 1, i32 2, i32 4, i32 16>
   ret <4 x i32> %1
@@ -92,7 +92,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
 ;
 ; AVX-LABEL: combine_vec_mul_pow2c:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i64> %x, <i64 1, i64 2, i64 4, i64 16>
   ret <4 x i64> %1
@@ -121,12 +121,12 @@ define <4 x i32> @combine_vec_mul_negpow2a(<4 x i32> %x) {
 define <4 x i32> @combine_vec_mul_negpow2b(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_negpow2b:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_negpow2b:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = mul <4 x i32> %x, <i32 -1, i32 -2, i32 -4, i32 -16>
   ret <4 x i32> %1
@@ -177,12 +177,12 @@ define <4 x i64> @combine_vec_mul_negpow2c(<4 x i64> %x) {
 define <4 x i32> @combine_vec_mul_shl_const(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_shl_const:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_const:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
   %2 = mul <4 x i32> %1, <i32 1, i32 3, i32 5, i32 7>
@@ -194,13 +194,13 @@ define <4 x i32> @combine_vec_mul_shl_oneuse0(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_oneuse0:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld %xmm1, %xmm0
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_oneuse0:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
   %2 = mul <4 x i32> %1, %y
@@ -211,13 +211,13 @@ define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_oneuse1:
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    pmulld %xmm1, %xmm0
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_oneuse1:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = shl <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
   %2 = mul <4 x i32> %y, %1
@@ -227,14 +227,14 @@ define <4 x i32> @combine_vec_mul_shl_oneuse1(<4 x i32> %x, <4 x i32> %y) {
 define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_multiuse0:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    pmulld %xmm0, %xmm1
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_multiuse0:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpmulld %xmm1, %xmm0, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -247,14 +247,14 @@ define <4 x i32> @combine_vec_mul_shl_multiuse0(<4 x i32> %x, <4 x i32> %y) {
 define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
 ; SSE-LABEL: combine_vec_mul_shl_multiuse1:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    pmulld %xmm0, %xmm1
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_shl_multiuse1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    vpmulld %xmm0, %xmm1, %xmm1
 ; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    retq
@@ -269,14 +269,14 @@ define <4 x i32> @combine_vec_mul_shl_multiuse1(<4 x i32> %x, <4 x i32> %y) {
 define <4 x i32> @combine_vec_mul_add(<4 x i32> %x) {
 ; SSE-LABEL: combine_vec_mul_add:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmulld {{.*}}(%rip), %xmm0
-; SSE-NEXT:    paddd {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: combine_vec_mul_add:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpmulld {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT:    vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %1 = add <4 x i32> %x, <i32 1, i32 2, i32 8, i32 16>
   %2 = mul <4 x i32> %1, <i32 4, i32 6, i32 2, i32 0>
@@ -290,7 +290,7 @@ define <16 x i8> @combine_mul_to_abs_v16i8(<16 x i8> %x) {
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    pxor %xmm2, %xmm2
 ; SSE-NEXT:    pcmpgtb %xmm0, %xmm2
-; SSE-NEXT:    por {{.*}}(%rip), %xmm2
+; SSE-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
 ; SSE-NEXT:    pmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
 ; SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
@@ -308,11 +308,11 @@ define <16 x i8> @combine_mul_to_abs_v16i8(<16 x i8> %x) {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtb %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vpor {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
 ; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
 ; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
 ; AVX-NEXT:    vpmullw %ymm0, %ymm1, %ymm0
-; AVX-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
@@ -329,7 +329,7 @@ define <2 x i64> @combine_mul_to_abs_v2i64(<2 x i64> %x) {
 ; SSE-NEXT:    movdqa %xmm0, %xmm1
 ; SSE-NEXT:    psrad $31, %xmm1
 ; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE-NEXT:    por {{.*}}(%rip), %xmm1
+; SSE-NEXT:    por {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE-NEXT:    movdqa %xmm0, %xmm2
 ; SSE-NEXT:    psrlq $32, %xmm2
 ; SSE-NEXT:    pmuludq %xmm1, %xmm2
@@ -346,7 +346,7 @@ define <2 x i64> @combine_mul_to_abs_v2i64(<2 x i64> %x) {
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; AVX-NEXT:    vpcmpgtq %xmm0, %xmm1, %xmm1
-; AVX-NEXT:    vpor {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT:    vpor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
 ; AVX-NEXT:    vpsrlq $32, %xmm0, %xmm2
 ; AVX-NEXT:    vpmuludq %xmm1, %xmm2, %xmm2
 ; AVX-NEXT:    vpsrlq $32, %xmm1, %xmm3
@@ -370,10 +370,10 @@ define <16 x i8> @PR35579(<16 x i8> %x) {
 ; SSE:       # %bb.0:
 ; SSE-NEXT:    pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
 ; SSE-NEXT:    punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
 ; SSE-NEXT:    pand %xmm2, %xmm0
-; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm1
+; SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE-NEXT:    pand %xmm2, %xmm1
 ; SSE-NEXT:    packuswb %xmm0, %xmm1
 ; SSE-NEXT:    movdqa %xmm1, %xmm0
@@ -382,8 +382,8 @@ define <16 x i8> @PR35579(<16 x i8> %x) {
 ; AVX-LABEL: PR35579:
 ; AVX:       # %bb.0:
 ; AVX-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; AVX-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
-; AVX-NEXT:    vpand {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    vextracti128 $1, %ymm0, %xmm1
 ; AVX-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
 ; AVX-NEXT:    vzeroupper
@@ -407,7 +407,7 @@ define <4 x i64> @fuzz15429(<4 x i64> %InVec) {
 ;
 ; AVX-LABEL: fuzz15429:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    movabsq $9223372036854775807, %rax # imm = 0x7FFFFFFFFFFFFFFF
 ; AVX-NEXT:    vpinsrq $0, %rax, %xmm0, %xmm1
 ; AVX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]

diff  --git a/llvm/test/CodeGen/X86/combine-rotates.ll b/llvm/test/CodeGen/X86/combine-rotates.ll
index d1fcd2fb0259..09cccc0c1658 100644
--- a/llvm/test/CodeGen/X86/combine-rotates.ll
+++ b/llvm/test/CodeGen/X86/combine-rotates.ll
@@ -24,19 +24,19 @@ define <4 x i32> @combine_vec_rot_rot(<4 x i32> %x) {
 ;
 ; XOP-LABEL: combine_vec_rot_rot:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT:    vprotd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
 ; AVX2-LABEL: combine_vec_rot_rot:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm1
-; AVX2-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVX2-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX2-NEXT:    vpor %xmm1, %xmm0, %xmm0
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: combine_vec_rot_rot:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vprolvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512-NEXT:    vprolvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
   %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
@@ -122,7 +122,7 @@ define <4 x i32> @combine_vec_rot_select_zero(<4 x i32>, <4 x i32>) {
 ; SSE2-NEXT:    movdqa {{.*#+}} xmm3 = [31,31,31,31]
 ; SSE2-NEXT:    pand %xmm1, %xmm3
 ; SSE2-NEXT:    pslld $23, %xmm3
-; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm3
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
 ; SSE2-NEXT:    cvttps2dq %xmm3, %xmm3
 ; SSE2-NEXT:    movdqa %xmm0, %xmm4
 ; SSE2-NEXT:    pmuludq %xmm3, %xmm4
@@ -185,9 +185,9 @@ define <4 x i32> @combine_vec_rot_select_zero(<4 x i32>, <4 x i32>) {
 define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
 ; SSE2-LABEL: rotate_demanded_bits:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE2-NEXT:    pslld $23, %xmm1
-; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE2-NEXT:    pmuludq %xmm1, %xmm0
@@ -204,7 +204,7 @@ define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
 ;
 ; XOP-LABEL: rotate_demanded_bits:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
 ; XOP-NEXT:    vprotd %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
@@ -221,7 +221,7 @@ define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
 ;
 ; AVX512-LABEL: rotate_demanded_bits:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
 ; AVX512-NEXT:    vprolvd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %3 = and <4 x i32> %1, <i32 30, i32 30, i32 30, i32 30>
@@ -236,9 +236,9 @@ define <4 x i32> @rotate_demanded_bits(<4 x i32>, <4 x i32>) {
 define <4 x i32> @rotate_demanded_bits_2(<4 x i32>, <4 x i32>) {
 ; SSE2-LABEL: rotate_demanded_bits_2:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE2-NEXT:    pslld $23, %xmm1
-; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE2-NEXT:    pmuludq %xmm1, %xmm0
@@ -255,7 +255,7 @@ define <4 x i32> @rotate_demanded_bits_2(<4 x i32>, <4 x i32>) {
 ;
 ; XOP-LABEL: rotate_demanded_bits_2:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
 ; XOP-NEXT:    vprotd %xmm1, %xmm0, %xmm0
 ; XOP-NEXT:    retq
 ;
@@ -272,7 +272,7 @@ define <4 x i32> @rotate_demanded_bits_2(<4 x i32>, <4 x i32>) {
 ;
 ; AVX512-LABEL: rotate_demanded_bits_2:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1
+; AVX512-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1
 ; AVX512-NEXT:    vprolvd %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    retq
   %3 = and <4 x i32> %1, <i32 23, i32 23, i32 23, i32 23>
@@ -288,9 +288,9 @@ define <4 x i32> @rotate_demanded_bits_3(<4 x i32>, <4 x i32>) {
 ; SSE2-LABEL: rotate_demanded_bits_3:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    paddd %xmm1, %xmm1
-; SSE2-NEXT:    pand {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE2-NEXT:    pslld $23, %xmm1
-; SSE2-NEXT:    paddd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
 ; SSE2-NEXT:    cvttps2dq %xmm1, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
 ; SSE2-NEXT:    pmuludq %xmm1, %xmm0

diff  --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll
index 0456f1d1bb37..3a5e35336e0e 100644
--- a/llvm/test/CodeGen/X86/oddsubvector.ll
+++ b/llvm/test/CodeGen/X86/oddsubvector.ll
@@ -159,189 +159,189 @@ define <16 x i32> @PR42819(<8 x i32>* %a0) {
 define void @PR42833() {
 ; SSE2-LABEL: PR42833:
 ; SSE2:       # %bb.0:
-; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm1
-; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm0
+; SSE2-NEXT:    movdqa c+144(%rip), %xmm1
+; SSE2-NEXT:    movdqa c+128(%rip), %xmm0
 ; SSE2-NEXT:    movd %xmm0, %eax
-; SSE2-NEXT:    addl {{.*}}(%rip), %eax
+; SSE2-NEXT:    addl b(%rip), %eax
 ; SSE2-NEXT:    movd %eax, %xmm2
 ; SSE2-NEXT:    movd %eax, %xmm3
 ; SSE2-NEXT:    paddd %xmm0, %xmm3
-; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm4
+; SSE2-NEXT:    movdqa d+144(%rip), %xmm4
 ; SSE2-NEXT:    psubd %xmm1, %xmm4
 ; SSE2-NEXT:    paddd %xmm1, %xmm1
 ; SSE2-NEXT:    movdqa %xmm0, %xmm5
 ; SSE2-NEXT:    paddd %xmm0, %xmm5
 ; SSE2-NEXT:    movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3]
-; SSE2-NEXT:    movdqa %xmm1, c+{{.*}}(%rip)
-; SSE2-NEXT:    movaps %xmm5, c+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm1
-; SSE2-NEXT:    movdqa c+{{.*}}(%rip), %xmm3
-; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm5
-; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm6
-; SSE2-NEXT:    movdqa d+{{.*}}(%rip), %xmm7
+; SSE2-NEXT:    movdqa %xmm1, c+144(%rip)
+; SSE2-NEXT:    movaps %xmm5, c+128(%rip)
+; SSE2-NEXT:    movdqa c+160(%rip), %xmm1
+; SSE2-NEXT:    movdqa c+176(%rip), %xmm3
+; SSE2-NEXT:    movdqa d+160(%rip), %xmm5
+; SSE2-NEXT:    movdqa d+176(%rip), %xmm6
+; SSE2-NEXT:    movdqa d+128(%rip), %xmm7
 ; SSE2-NEXT:    movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
 ; SSE2-NEXT:    psubd %xmm0, %xmm7
 ; SSE2-NEXT:    psubd %xmm3, %xmm6
 ; SSE2-NEXT:    psubd %xmm1, %xmm5
-; SSE2-NEXT:    movdqa %xmm5, d+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm6, d+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm4, d+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm7, d+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm5, d+160(%rip)
+; SSE2-NEXT:    movdqa %xmm6, d+176(%rip)
+; SSE2-NEXT:    movdqa %xmm4, d+144(%rip)
+; SSE2-NEXT:    movdqa %xmm7, d+128(%rip)
 ; SSE2-NEXT:    paddd %xmm3, %xmm3
 ; SSE2-NEXT:    paddd %xmm1, %xmm1
-; SSE2-NEXT:    movdqa %xmm1, c+{{.*}}(%rip)
-; SSE2-NEXT:    movdqa %xmm3, c+{{.*}}(%rip)
+; SSE2-NEXT:    movdqa %xmm1, c+160(%rip)
+; SSE2-NEXT:    movdqa %xmm3, c+176(%rip)
 ; SSE2-NEXT:    retq
 ;
 ; SSE42-LABEL: PR42833:
 ; SSE42:       # %bb.0:
-; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm0
-; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm1
+; SSE42-NEXT:    movdqa c+144(%rip), %xmm0
+; SSE42-NEXT:    movdqa c+128(%rip), %xmm1
 ; SSE42-NEXT:    movd %xmm1, %eax
-; SSE42-NEXT:    addl {{.*}}(%rip), %eax
+; SSE42-NEXT:    addl b(%rip), %eax
 ; SSE42-NEXT:    movd %eax, %xmm2
 ; SSE42-NEXT:    paddd %xmm1, %xmm2
-; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm3
+; SSE42-NEXT:    movdqa d+144(%rip), %xmm3
 ; SSE42-NEXT:    psubd %xmm0, %xmm3
 ; SSE42-NEXT:    paddd %xmm0, %xmm0
 ; SSE42-NEXT:    movdqa %xmm1, %xmm4
 ; SSE42-NEXT:    paddd %xmm1, %xmm4
 ; SSE42-NEXT:    pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7]
-; SSE42-NEXT:    movdqa %xmm0, c+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm4, c+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm0
-; SSE42-NEXT:    movdqa c+{{.*}}(%rip), %xmm2
-; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm4
-; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm5
-; SSE42-NEXT:    movdqa d+{{.*}}(%rip), %xmm6
+; SSE42-NEXT:    movdqa %xmm0, c+144(%rip)
+; SSE42-NEXT:    movdqa %xmm4, c+128(%rip)
+; SSE42-NEXT:    movdqa c+160(%rip), %xmm0
+; SSE42-NEXT:    movdqa c+176(%rip), %xmm2
+; SSE42-NEXT:    movdqa d+160(%rip), %xmm4
+; SSE42-NEXT:    movdqa d+176(%rip), %xmm5
+; SSE42-NEXT:    movdqa d+128(%rip), %xmm6
 ; SSE42-NEXT:    pinsrd $0, %eax, %xmm1
 ; SSE42-NEXT:    psubd %xmm1, %xmm6
 ; SSE42-NEXT:    psubd %xmm2, %xmm5
 ; SSE42-NEXT:    psubd %xmm0, %xmm4
-; SSE42-NEXT:    movdqa %xmm4, d+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm5, d+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm3, d+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm6, d+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm4, d+160(%rip)
+; SSE42-NEXT:    movdqa %xmm5, d+176(%rip)
+; SSE42-NEXT:    movdqa %xmm3, d+144(%rip)
+; SSE42-NEXT:    movdqa %xmm6, d+128(%rip)
 ; SSE42-NEXT:    paddd %xmm2, %xmm2
 ; SSE42-NEXT:    paddd %xmm0, %xmm0
-; SSE42-NEXT:    movdqa %xmm0, c+{{.*}}(%rip)
-; SSE42-NEXT:    movdqa %xmm2, c+{{.*}}(%rip)
+; SSE42-NEXT:    movdqa %xmm0, c+160(%rip)
+; SSE42-NEXT:    movdqa %xmm2, c+176(%rip)
 ; SSE42-NEXT:    retq
 ;
 ; AVX1-LABEL: PR42833:
 ; AVX1:       # %bb.0:
-; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm0
+; AVX1-NEXT:    vmovdqa c+128(%rip), %xmm0
 ; AVX1-NEXT:    vmovd %xmm0, %eax
-; AVX1-NEXT:    addl {{.*}}(%rip), %eax
+; AVX1-NEXT:    addl b(%rip), %eax
 ; AVX1-NEXT:    vmovd %eax, %xmm1
 ; AVX1-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm2
-; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
+; AVX1-NEXT:    vmovdqa c+144(%rip), %xmm3
 ; AVX1-NEXT:    vpaddd %xmm3, %xmm3, %xmm3
 ; AVX1-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
 ; AVX1-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7]
-; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm2
-; AVX1-NEXT:    vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
-; AVX1-NEXT:    vmovups %ymm1, c+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa d+144(%rip), %xmm2
+; AVX1-NEXT:    vpsubd c+144(%rip), %xmm2, %xmm2
+; AVX1-NEXT:    vmovups %ymm1, c+128(%rip)
 ; AVX1-NEXT:    vpinsrd $0, %eax, %xmm0, %xmm0
-; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
+; AVX1-NEXT:    vmovdqa d+128(%rip), %xmm1
 ; AVX1-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
-; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
-; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
+; AVX1-NEXT:    vmovdqa d+176(%rip), %xmm1
+; AVX1-NEXT:    vmovdqa c+176(%rip), %xmm3
 ; AVX1-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm4
-; AVX1-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm5
+; AVX1-NEXT:    vmovdqa d+160(%rip), %xmm4
+; AVX1-NEXT:    vmovdqa c+160(%rip), %xmm5
 ; AVX1-NEXT:    vpsubd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT:    vmovdqa %xmm2, d+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm4, d+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm1, d+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm0, d+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm2, d+144(%rip)
+; AVX1-NEXT:    vmovdqa %xmm4, d+160(%rip)
+; AVX1-NEXT:    vmovdqa %xmm1, d+176(%rip)
+; AVX1-NEXT:    vmovdqa %xmm0, d+128(%rip)
 ; AVX1-NEXT:    vpaddd %xmm3, %xmm3, %xmm0
 ; AVX1-NEXT:    vpaddd %xmm5, %xmm5, %xmm1
-; AVX1-NEXT:    vmovdqa %xmm1, c+{{.*}}(%rip)
-; AVX1-NEXT:    vmovdqa %xmm0, c+{{.*}}(%rip)
+; AVX1-NEXT:    vmovdqa %xmm1, c+160(%rip)
+; AVX1-NEXT:    vmovdqa %xmm0, c+176(%rip)
 ; AVX1-NEXT:    vzeroupper
 ; AVX1-NEXT:    retq
 ;
 ; AVX2-LABEL: PR42833:
 ; AVX2:       # %bb.0:
-; AVX2-NEXT:    movl {{.*}}(%rip), %eax
-; AVX2-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm0
-; AVX2-NEXT:    addl c+{{.*}}(%rip), %eax
+; AVX2-NEXT:    movl b(%rip), %eax
+; AVX2-NEXT:    vmovdqu c+128(%rip), %ymm0
+; AVX2-NEXT:    addl c+128(%rip), %eax
 ; AVX2-NEXT:    vmovd %eax, %xmm1
 ; AVX2-NEXT:    vpaddd %ymm1, %ymm0, %ymm2
 ; AVX2-NEXT:    vpaddd %ymm0, %ymm0, %ymm3
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1,2,3,4,5,6,7]
-; AVX2-NEXT:    vmovdqu %ymm2, c+{{.*}}(%rip)
-; AVX2-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm2
-; AVX2-NEXT:    vmovdqu d+{{.*}}(%rip), %ymm3
-; AVX2-NEXT:    vmovdqu d+{{.*}}(%rip), %ymm4
+; AVX2-NEXT:    vmovdqu %ymm2, c+128(%rip)
+; AVX2-NEXT:    vmovdqu c+160(%rip), %ymm2
+; AVX2-NEXT:    vmovdqu d+160(%rip), %ymm3
+; AVX2-NEXT:    vmovdqu d+128(%rip), %ymm4
 ; AVX2-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
 ; AVX2-NEXT:    vpsubd %ymm0, %ymm4, %ymm0
 ; AVX2-NEXT:    vpsubd %ymm2, %ymm3, %ymm1
-; AVX2-NEXT:    vmovdqu %ymm1, d+{{.*}}(%rip)
-; AVX2-NEXT:    vmovdqu %ymm0, d+{{.*}}(%rip)
+; AVX2-NEXT:    vmovdqu %ymm1, d+160(%rip)
+; AVX2-NEXT:    vmovdqu %ymm0, d+128(%rip)
 ; AVX2-NEXT:    vpaddd %ymm2, %ymm2, %ymm0
-; AVX2-NEXT:    vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX2-NEXT:    vmovdqu %ymm0, c+160(%rip)
 ; AVX2-NEXT:    vzeroupper
 ; AVX2-NEXT:    retq
 ;
 ; AVX512-LABEL: PR42833:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    movl {{.*}}(%rip), %eax
-; AVX512-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm0
-; AVX512-NEXT:    vmovdqu64 c+{{.*}}(%rip), %zmm1
-; AVX512-NEXT:    addl c+{{.*}}(%rip), %eax
+; AVX512-NEXT:    movl b(%rip), %eax
+; AVX512-NEXT:    vmovdqu c+128(%rip), %ymm0
+; AVX512-NEXT:    vmovdqu64 c+128(%rip), %zmm1
+; AVX512-NEXT:    addl c+128(%rip), %eax
 ; AVX512-NEXT:    vmovd %eax, %xmm2
 ; AVX512-NEXT:    vpaddd %ymm2, %ymm0, %ymm2
 ; AVX512-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
 ; AVX512-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1,2,3,4,5,6,7]
-; AVX512-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm2
-; AVX512-NEXT:    vmovdqu %ymm0, c+{{.*}}(%rip)
-; AVX512-NEXT:    vmovdqu c+{{.*}}(%rip), %ymm0
-; AVX512-NEXT:    vmovdqu64 d+{{.*}}(%rip), %zmm3
+; AVX512-NEXT:    vmovdqa c+128(%rip), %xmm2
+; AVX512-NEXT:    vmovdqu %ymm0, c+128(%rip)
+; AVX512-NEXT:    vmovdqu c+160(%rip), %ymm0
+; AVX512-NEXT:    vmovdqu64 d+128(%rip), %zmm3
 ; AVX512-NEXT:    vpinsrd $0, %eax, %xmm2, %xmm2
 ; AVX512-NEXT:    vinserti32x4 $0, %xmm2, %zmm1, %zmm1
 ; AVX512-NEXT:    vinserti64x4 $1, %ymm0, %zmm1, %zmm1
 ; AVX512-NEXT:    vpsubd %zmm1, %zmm3, %zmm1
-; AVX512-NEXT:    vmovdqu64 %zmm1, d+{{.*}}(%rip)
+; AVX512-NEXT:    vmovdqu64 %zmm1, d+128(%rip)
 ; AVX512-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
-; AVX512-NEXT:    vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX512-NEXT:    vmovdqu %ymm0, c+160(%rip)
 ; AVX512-NEXT:    vzeroupper
 ; AVX512-NEXT:    retq
 ;
 ; XOP-LABEL: PR42833:
 ; XOP:       # %bb.0:
-; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm0
+; XOP-NEXT:    vmovdqa c+128(%rip), %xmm0
 ; XOP-NEXT:    vmovd %xmm0, %eax
-; XOP-NEXT:    addl {{.*}}(%rip), %eax
+; XOP-NEXT:    addl b(%rip), %eax
 ; XOP-NEXT:    vmovd %eax, %xmm1
 ; XOP-NEXT:    vpaddd %xmm1, %xmm0, %xmm1
 ; XOP-NEXT:    vpaddd %xmm0, %xmm0, %xmm2
-; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
+; XOP-NEXT:    vmovdqa c+144(%rip), %xmm3
 ; XOP-NEXT:    vpaddd %xmm3, %xmm3, %xmm3
 ; XOP-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm2
 ; XOP-NEXT:    vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1,2,3,4,5,6,7]
-; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm2
-; XOP-NEXT:    vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
-; XOP-NEXT:    vmovups %ymm1, c+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa d+144(%rip), %xmm2
+; XOP-NEXT:    vpsubd c+144(%rip), %xmm2, %xmm2
+; XOP-NEXT:    vmovups %ymm1, c+128(%rip)
 ; XOP-NEXT:    vpinsrd $0, %eax, %xmm0, %xmm0
-; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
+; XOP-NEXT:    vmovdqa d+128(%rip), %xmm1
 ; XOP-NEXT:    vpsubd %xmm0, %xmm1, %xmm0
-; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm1
-; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm3
+; XOP-NEXT:    vmovdqa d+176(%rip), %xmm1
+; XOP-NEXT:    vmovdqa c+176(%rip), %xmm3
 ; XOP-NEXT:    vpsubd %xmm3, %xmm1, %xmm1
-; XOP-NEXT:    vmovdqa d+{{.*}}(%rip), %xmm4
-; XOP-NEXT:    vmovdqa c+{{.*}}(%rip), %xmm5
+; XOP-NEXT:    vmovdqa d+160(%rip), %xmm4
+; XOP-NEXT:    vmovdqa c+160(%rip), %xmm5
 ; XOP-NEXT:    vpsubd %xmm5, %xmm4, %xmm4
-; XOP-NEXT:    vmovdqa %xmm2, d+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm4, d+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm1, d+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm0, d+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm2, d+144(%rip)
+; XOP-NEXT:    vmovdqa %xmm4, d+160(%rip)
+; XOP-NEXT:    vmovdqa %xmm1, d+176(%rip)
+; XOP-NEXT:    vmovdqa %xmm0, d+128(%rip)
 ; XOP-NEXT:    vpaddd %xmm3, %xmm3, %xmm0
 ; XOP-NEXT:    vpaddd %xmm5, %xmm5, %xmm1
-; XOP-NEXT:    vmovdqa %xmm1, c+{{.*}}(%rip)
-; XOP-NEXT:    vmovdqa %xmm0, c+{{.*}}(%rip)
+; XOP-NEXT:    vmovdqa %xmm1, c+160(%rip)
+; XOP-NEXT:    vmovdqa %xmm0, c+176(%rip)
 ; XOP-NEXT:    vzeroupper
 ; XOP-NEXT:    retq
   %1 = load i32, i32* @b, align 4

diff  --git a/llvm/test/CodeGen/X86/sse-domains.ll b/llvm/test/CodeGen/X86/sse-domains.ll
index 8016a246fa66..ce81450a8519 100644
--- a/llvm/test/CodeGen/X86/sse-domains.ll
+++ b/llvm/test/CodeGen/X86/sse-domains.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mcpu=nehalem | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-apple-macosx10.7"
@@ -12,21 +13,31 @@ target triple = "x86_64-apple-macosx10.7"
 ; back-edge. Check that everything is still moved into the integer domain.
 
 define void @f(<4 x i32>* nocapture %p, i32 %n) nounwind uwtable ssp {
+; CHECK-LABEL: f:
+; CHECK:       ## %bb.0: ## %entry
+; CHECK-NEXT:    addq $16, %rdi
+; CHECK-NEXT:    pxor %xmm1, %xmm1
+; CHECK-NEXT:    movdqa {{.*#+}} xmm0 = [127,127,127,127]
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  LBB0_1: ## %while.body
+; CHECK-NEXT:    ## =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    pand %xmm0, %xmm1
+; CHECK-NEXT:    movdqa %xmm1, -16(%rdi)
+; CHECK-NEXT:    movdqa (%rdi), %xmm1
+; CHECK-NEXT:    addq $16, %rdi
+; CHECK-NEXT:    paddd %xmm1, %xmm1
+; CHECK-NEXT:    decl %esi
+; CHECK-NEXT:    jne LBB0_1
+; CHECK-NEXT:  ## %bb.2: ## %while.end
+; CHECK-NEXT:    retq
 entry:
   br label %while.body
 
 ; Materialize a zeroinitializer and a constant-pool load in the integer domain.
 ; The order is not important.
-; CHECK: pxor
-; CHECK: movdqa
 
 ; The instructions in the loop must all be integer domain as well.
-; CHECK: while.body
-; CHECK: pand
-; CHECK: movdqa
-; CHECK: movdqa
 ; Finally, the controlling integer-only instruction.
-; CHECK: paddd
 while.body:
   %p.addr.04 = phi <4 x i32>* [ %incdec.ptr, %while.body ], [ %p, %entry ]
   %n.addr.03 = phi i32 [ %dec, %while.body ], [ %n, %entry ]

diff  --git a/llvm/test/CodeGen/X86/sse2-vector-shifts.ll b/llvm/test/CodeGen/X86/sse2-vector-shifts.ll
index 24d0453c165a..7e6c92d00656 100644
--- a/llvm/test/CodeGen/X86/sse2-vector-shifts.ll
+++ b/llvm/test/CodeGen/X86/sse2-vector-shifts.ll
@@ -264,7 +264,7 @@ define <4 x i32> @srl_srl_v4i32(<4 x i32> %x) nounwind {
 define <4 x i32> @srl_shl_v4i32(<4 x i32> %x) nounwind {
 ; CHECK-LABEL: srl_shl_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %srl0 = shl <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
   %srl1 = lshr <4 x i32> %srl0, <i32 4, i32 4, i32 4, i32 4>
@@ -294,7 +294,7 @@ define <4 x i32> @shl_shl_v4i32(<4 x i32> %x) nounwind {
 define <4 x i32> @shl_sra_v4i32(<4 x i32> %x) nounwind {
 ; CHECK-LABEL: shl_sra_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    andps {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    retq
   %shl0 = ashr <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
   %shl1 = shl <4 x i32> %shl0, <i32 4, i32 4, i32 4, i32 4>
@@ -305,7 +305,7 @@ define <4 x i32> @shl_srl_v4i32(<4 x i32> %x) nounwind {
 ; MASK-LABEL: shl_srl_v4i32:
 ; MASK:       # %bb.0:
 ; MASK-NEXT:    pslld $3, %xmm0
-; MASK-NEXT:    pand {{.*}}(%rip), %xmm0
+; MASK-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; MASK-NEXT:    retq
 ;
 ; SHIFT-LABEL: shl_srl_v4i32:
@@ -321,7 +321,7 @@ define <4 x i32> @shl_srl_v4i32(<4 x i32> %x) nounwind {
 define <4 x i32> @shl_zext_srl_v4i32(<4 x i16> %x) nounwind {
 ; CHECK-LABEL: shl_zext_srl_v4i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    pand {{.*}}(%rip), %xmm0
+; CHECK-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; CHECK-NEXT:    pxor %xmm1, %xmm1
 ; CHECK-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
 ; CHECK-NEXT:    retq

diff  --git a/llvm/test/CodeGen/X86/vec_shift6.ll b/llvm/test/CodeGen/X86/vec_shift6.ll
index 214a4c8598d6..debbf1bf9411 100644
--- a/llvm/test/CodeGen/X86/vec_shift6.ll
+++ b/llvm/test/CodeGen/X86/vec_shift6.ll
@@ -11,12 +11,12 @@
 define <8 x i16> @test1(<8 x i16> %a) {
 ; SSE-LABEL: test1:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test1:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
   ret <8 x i16> %shl
@@ -25,12 +25,12 @@ define <8 x i16> @test1(<8 x i16> %a) {
 define <8 x i16> @test2(<8 x i16> %a) {
 ; SSE-LABEL: test2:
 ; SSE:       # %bb.0:
-; SSE-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE-NEXT:    retq
 ;
 ; AVX-LABEL: test2:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpmullw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1>
   ret <8 x i16> %shl
@@ -46,19 +46,19 @@ define <4 x i32> @test3(<4 x i32> %a) {
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
 ; SSE2-NEXT:    pmuludq %xmm0, %xmm1
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT:    pmuludq {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    pmuludq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: test3:
 ; SSE41:       # %bb.0:
-; SSE41-NEXT:    pmulld {{.*}}(%rip), %xmm0
+; SSE41-NEXT:    pmulld {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; SSE41-NEXT:    retq
 ;
 ; AVX-LABEL: test3:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3>
   ret <4 x i32> %shl
@@ -83,7 +83,7 @@ define <4 x i32> @test4(<4 x i32> %a) {
 ;
 ; AVX-LABEL: test4:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; AVX-NEXT:    retq
   %shl = shl <4 x i32> %a, <i32 0, i32 0, i32 1, i32 1>
   ret <4 x i32> %shl
@@ -103,7 +103,7 @@ define <16 x i16> @test5(<16 x i16> %a) {
 ;
 ; AVX-LABEL: test5:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %shl = shl <16 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
   ret <16 x i16> %shl
@@ -142,7 +142,7 @@ define <8 x i32> @test6(<8 x i32> %a) {
 ;
 ; AVX-LABEL: test6:
 ; AVX:       # %bb.0:
-; AVX-NEXT:    vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
 ; AVX-NEXT:    retq
   %shl = shl <8 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
   ret <8 x i32> %shl
@@ -237,7 +237,7 @@ define <16 x i32> @test8(<16 x i32> %a) {
 ;
 ; AVX512-LABEL: test8:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsllvd {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    vpsllvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
 ; AVX512-NEXT:    retq
   %shl = shl <16 x i32> %a, <i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3, i32 1, i32 1, i32 2, i32 3>
   ret <16 x i32> %shl
@@ -283,7 +283,7 @@ define <8 x i64> @test9(<8 x i64> %a) {
 ;
 ; AVX512-LABEL: test9:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vpsllvq {{.*}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT:    vpsllvq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
 ; AVX512-NEXT:    retq
   %shl = shl <8 x i64> %a, <i64 1, i64 1, i64 2, i64 3, i64 1, i64 1, i64 2, i64 3>
   ret <8 x i64> %shl

diff  --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll
index f0673bdd1aed..8925fc90c686 100644
--- a/llvm/test/CodeGen/X86/vector-gep.ll
+++ b/llvm/test/CodeGen/X86/vector-gep.ll
@@ -6,7 +6,7 @@ define <4 x i32*> @AGEP0(i32* %ptr) nounwind {
 ; CHECK-LABEL: AGEP0:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %xmm0
-; CHECK-NEXT:    vpaddd {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; CHECK-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; CHECK-NEXT:    retl
   %vecinit.i = insertelement <4 x i32*> undef, i32* %ptr, i32 0
   %vecinit2.i = insertelement <4 x i32*> %vecinit.i, i32* %ptr, i32 1

diff  --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
index f2dd7af2d558..ffd19662807d 100644
--- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
+++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
@@ -17,30 +17,30 @@
 define <16 x i8> @test_128_i8_x_16_7_mask_lshr_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_lshr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_7_mask_lshr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %t1 = lshr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -50,25 +50,25 @@ define <16 x i8> @test_128_i8_x_16_7_mask_lshr_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_lshr_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
@@ -78,25 +78,25 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_lshr_2(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_2:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $2, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_2:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $2, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_2:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $2, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_2:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $2, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
@@ -106,30 +106,30 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_2(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_lshr_3(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $3, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_3:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_3:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $3, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_3:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
   %t1 = lshr <16 x i8> %t0, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
@@ -138,30 +138,30 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_3(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_lshr_4(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $4, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_lshr_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $4, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_lshr_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
   %t1 = lshr <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
@@ -171,25 +171,25 @@ define <16 x i8> @test_128_i8_x_16_28_mask_lshr_4(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_224_mask_lshr_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
@@ -199,25 +199,25 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_224_mask_lshr_4(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $4, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $4, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
@@ -228,25 +228,25 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_5(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    psrlw $5, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_5:
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_5:
 ; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    psrlw $5, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_5:
 ; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
   %t1 = lshr <16 x i8> %t0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
@@ -256,25 +256,25 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_6(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    psrlw $6, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_6:
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_lshr_6:
 ; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    psrlw $6, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_lshr_6:
 ; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
   %t1 = lshr <16 x i8> %t0, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
@@ -286,30 +286,30 @@ define <16 x i8> @test_128_i8_x_16_224_mask_lshr_6(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_7_mask_ashr_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_ashr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_7_mask_ashr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %t1 = ashr <16 x i8> %t0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
@@ -319,25 +319,25 @@ define <16 x i8> @test_128_i8_x_16_7_mask_ashr_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_ashr_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
@@ -347,25 +347,25 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_ashr_2(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_2:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $2, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_2:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $2, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_2:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $2, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_2:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $2, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
@@ -375,30 +375,30 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_2(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_ashr_3(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $3, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_3:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_3:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $3, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_3:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
   %t1 = ashr <16 x i8> %t0, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
@@ -407,30 +407,30 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_3(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_ashr_4(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $4, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_ashr_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $4, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_ashr_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
   %t1 = ashr <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
@@ -440,7 +440,7 @@ define <16 x i8> @test_128_i8_x_16_28_mask_ashr_4(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
 ; X86-SSE2-NEXT:    pxor %xmm1, %xmm0
@@ -449,7 +449,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
@@ -458,7 +458,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
 ; X64-SSE2-NEXT:    pxor %xmm1, %xmm0
@@ -467,7 +467,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [64,64,64,64,64,64,64,64,64,64,64,64,64,64,64,64]
 ; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
@@ -480,7 +480,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $4, %xmm0
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
 ; X86-SSE2-NEXT:    pxor %xmm1, %xmm0
@@ -489,7 +489,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) {
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
@@ -498,7 +498,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $4, %xmm0
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
 ; X64-SSE2-NEXT:    pxor %xmm1, %xmm0
@@ -507,7 +507,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_4(<16 x i8> %a0) {
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
 ; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
@@ -521,7 +521,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_5(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_5:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    psrlw $5, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
 ; X86-SSE2-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE2-NEXT:    psubb %xmm1, %xmm0
@@ -530,7 +530,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_5(<16 x i8> %a0) {
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_5:
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
@@ -539,7 +539,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_5(<16 x i8> %a0) {
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_5:
 ; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    psrlw $5, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
 ; X64-SSE2-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE2-NEXT:    psubb %xmm1, %xmm0
@@ -548,7 +548,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_5(<16 x i8> %a0) {
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_5:
 ; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4]
 ; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
@@ -561,7 +561,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_6:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    psrlw $6, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
 ; X86-SSE2-NEXT:    pxor %xmm1, %xmm0
 ; X86-SSE2-NEXT:    psubb %xmm1, %xmm0
@@ -570,7 +570,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_6:
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
 ; X86-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
@@ -579,7 +579,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_ashr_6:
 ; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    psrlw $6, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    movdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
 ; X64-SSE2-NEXT:    pxor %xmm1, %xmm0
 ; X64-SSE2-NEXT:    psubb %xmm1, %xmm0
@@ -588,7 +588,7 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_ashr_6:
 ; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vmovdqa {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
 ; X64-AVX-NEXT:    vpxor %xmm1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsubb %xmm1, %xmm0, %xmm0
@@ -603,25 +603,25 @@ define <16 x i8> @test_128_i8_x_16_224_mask_ashr_6(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_7_mask_shl_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddb %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddb %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -631,25 +631,25 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_7_mask_shl_4(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $4, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $4, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -660,25 +660,25 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_5(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    psllw $5, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_5:
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vpsllw $5, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_5:
 ; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    psllw $5, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_5:
 ; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpsllw $5, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %t1 = shl <16 x i8> %t0, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
@@ -688,25 +688,25 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_6(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6:
 ; X86-SSE2:       # %bb.0:
 ; X86-SSE2-NEXT:    psllw $6, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_7_mask_shl_6:
 ; X86-AVX:       # %bb.0:
 ; X86-AVX-NEXT:    vpsllw $6, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_7_mask_shl_6:
 ; X64-SSE2:       # %bb.0:
 ; X64-SSE2-NEXT:    psllw $6, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_7_mask_shl_6:
 ; X64-AVX:       # %bb.0:
 ; X64-AVX-NEXT:    vpsllw $6, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   %t1 = shl <16 x i8> %t0, <i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6, i8 6>
@@ -716,25 +716,25 @@ define <16 x i8> @test_128_i8_x_16_7_mask_shl_6(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_shl_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddb %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddb %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
@@ -744,25 +744,25 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_1(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_shl_2(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_2:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $2, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_2:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $2, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_2:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $2, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_2:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $2, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
@@ -772,25 +772,25 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_2(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_shl_3(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_3:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $3, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_3:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $3, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_3:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $3, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_3:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $3, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
@@ -800,30 +800,30 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_3(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_28_mask_shl_4(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $4, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_28_mask_shl_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_28_mask_shl_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $4, %xmm0
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_28_mask_shl_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28, i8 28>
   %t1 = shl <16 x i8> %t0, <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>
@@ -833,25 +833,25 @@ define <16 x i8> @test_128_i8_x_16_28_mask_shl_4(<16 x i8> %a0) {
 define <16 x i8> @test_128_i8_x_16_224_mask_shl_1(<16 x i8> %a0) {
 ; X86-SSE2-LABEL: test_128_i8_x_16_224_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddb %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i8_x_16_224_mask_shl_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i8_x_16_224_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddb %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i8_x_16_224_mask_shl_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpaddb %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <16 x i8> %a0, <i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224, i8 224>
@@ -868,25 +868,25 @@ define <16 x i8> @test_128_i8_x_16_224_mask_shl_1(<16 x i8> %a0) {
 define <8 x i16> @test_128_i16_x_8_127_mask_lshr_1(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_lshr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_lshr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
@@ -897,25 +897,25 @@ define <8 x i16> @test_128_i16_x_8_127_mask_lshr_1(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $3, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $3, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_3:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -925,25 +925,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_3(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $4, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $4, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -953,25 +953,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_4(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $5, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $5, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_5:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -981,25 +981,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_5(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_6(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $6, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $6, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_lshr_6:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1010,25 +1010,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_lshr_6(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_1(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
@@ -1038,25 +1038,25 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_1(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_8(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $8, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $8, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_lshr_8:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $8, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
@@ -1117,25 +1117,25 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_lshr_10(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_127_mask_ashr_1(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_ashr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_ashr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
@@ -1146,25 +1146,25 @@ define <8 x i16> @test_128_i16_x_8_127_mask_ashr_1(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $3, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $3, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_3:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $3, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1174,25 +1174,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_3(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $4, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $4, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $4, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1202,25 +1202,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_4(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $5, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $5, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_5:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $5, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1230,25 +1230,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_5(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_6(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlw $6, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlw $6, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_ashr_6:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlw $6, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1259,25 +1259,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_ashr_6(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_1(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psraw $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsraw $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psraw $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsraw $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
@@ -1287,25 +1287,25 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_1(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_8(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psraw $8, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsraw $8, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psraw $8, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_ashr_8:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsraw $8, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
@@ -1366,25 +1366,25 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddw %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpaddw %xmm0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddw %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpaddw %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
@@ -1394,25 +1394,25 @@ define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_127_mask_shl_8(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_8:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $8, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_8:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $8, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_8:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $8, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_8:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $8, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
@@ -1471,25 +1471,25 @@ define <8 x i16> @test_128_i16_x_8_127_mask_shl_10(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_3:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $3, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_3:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $3, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_3:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $3, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_3:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $3, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1499,25 +1499,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_3(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_4:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $4, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_4:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_4:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $4, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_4:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $4, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1527,25 +1527,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_4(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_5:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $5, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_5:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $5, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_5:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $5, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_5:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $5, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1555,25 +1555,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_5(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_6:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllw $6, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_6:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllw $6, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_2032_mask_shl_6:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllw $6, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_2032_mask_shl_6:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllw $6, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032, i16 2032>
@@ -1584,25 +1584,25 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
 define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) {
 ; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddw %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpaddw %xmm0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddw %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpaddw %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
@@ -1619,13 +1619,13 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) {
 define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1638,13 +1638,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_lshr_1:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -1662,13 +1662,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_lshr_1(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $7, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $7, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1681,13 +1681,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $7, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_7:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $7, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -1704,13 +1704,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_7(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_8(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $8, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $8, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1723,13 +1723,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_8(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $8, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_8:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $8, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -1746,13 +1746,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_8(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_9(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $9, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $9, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1765,13 +1765,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_9(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $9, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_9:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $9, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -1788,13 +1788,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_9(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_10(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $10, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $10, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1807,13 +1807,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_10(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $10, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_lshr_10:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $10, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -1831,13 +1831,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_lshr_10(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_1(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1850,13 +1850,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_1(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_1:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -1873,13 +1873,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_1(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_16(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $16, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1892,13 +1892,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_16(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $16, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_lshr_16:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $16, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -1966,13 +1966,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_lshr_18(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_32767_mask_ashr_1(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -1985,13 +1985,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_ashr_1(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_ashr_1:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2009,13 +2009,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_ashr_1(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_7(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $7, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $7, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2028,13 +2028,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_7(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $7, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_7:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $7, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2051,13 +2051,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_7(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_8(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $8, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $8, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2070,13 +2070,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_8(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $8, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_8:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $8, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2093,13 +2093,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_8(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_9(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $9, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $9, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2112,13 +2112,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_9(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $9, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_9:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $9, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2135,13 +2135,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_9(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_10(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrld $10, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrld $10, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2154,13 +2154,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_10(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrld $10, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_ashr_10:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrld $10, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2178,13 +2178,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_ashr_10(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_1(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrad $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrad $1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2197,13 +2197,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_1(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrad $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_1:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrad $1, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2220,13 +2220,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_1(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_16(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrad $16, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrad $16, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2239,13 +2239,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_16(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrad $16, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_ashr_16:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrad $16, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2313,13 +2313,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddd %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2332,13 +2332,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddd %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2355,13 +2355,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_32767_mask_shl_16(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_16:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    pslld $16, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_16:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpslld $16, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2374,13 +2374,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_16(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_16:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pslld $16, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_16:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpslld $16, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2446,13 +2446,13 @@ define <4 x i32> @test_128_i32_x_4_32767_mask_shl_18(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_7(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    pslld $7, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpslld $7, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2465,13 +2465,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_7(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pslld $7, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_7:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpslld $7, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2488,13 +2488,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_7(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_8(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    pslld $8, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpslld $8, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2507,13 +2507,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_8(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pslld $8, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_8:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpslld $8, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2530,13 +2530,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_8(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_9(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    pslld $9, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpslld $9, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2549,13 +2549,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_9(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pslld $9, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_9:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpslld $9, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2572,13 +2572,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_9(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    pslld $10, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpslld $10, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2591,13 +2591,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pslld $10, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_8388352_mask_shl_10:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpslld $10, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2615,13 +2615,13 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
 define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
 ; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddd %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2634,13 +2634,13 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddd %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpaddd %xmm0, %xmm0, %xmm0
 ; X64-AVX1-NEXT:    retq
 ;
@@ -2664,25 +2664,25 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
 define <2 x i64> @test_128_i64_x_2_2147483647_mask_lshr_1(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_lshr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
@@ -2693,25 +2693,25 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_lshr_1(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_15(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $15, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $15, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $15, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_15:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $15, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -2721,25 +2721,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_15(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_16(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $16, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $16, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $16, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_16:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $16, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -2749,25 +2749,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_16(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_17(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $17, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $17, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $17, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_17:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $17, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -2777,25 +2777,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_17(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_18(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $18, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $18, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $18, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_lshr_18:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $18, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -2806,25 +2806,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_lshr_18(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_1(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
@@ -2834,13 +2834,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_1(<2 x i64> %a
 define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_32(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $32, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrlq $32, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -2853,13 +2853,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_32(<2 x i64> %
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $32, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_lshr_32:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $32, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
@@ -2920,25 +2920,25 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_lshr_34(<2 x i64> %
 define <2 x i64> @test_128_i64_x_2_2147483647_mask_ashr_1(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $1, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_ashr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
@@ -2949,25 +2949,25 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_ashr_1(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_15(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $15, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $15, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $15, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_15:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $15, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -2977,25 +2977,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_15(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_16(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $16, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $16, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $16, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_16:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $16, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -3005,25 +3005,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_16(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_17(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $17, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $17, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $17, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_17:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $17, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -3033,25 +3033,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_17(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_18(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrlq $18, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsrlq $18, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrlq $18, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_ashr_18:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrlq $18, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -3062,14 +3062,14 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_ashr_18(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psrad $1, %xmm0
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrad $1, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpxor %xmm1, %xmm1, %xmm1
 ; X86-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
@@ -3086,13 +3086,13 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psrad $1, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsrad $1, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
@@ -3102,7 +3102,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a
 define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; X86-SSE2-NEXT:    psrad $31, %xmm0
 ; X86-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -3112,7 +3112,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %
 ;
 ; X86-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
 ; X86-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X86-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -3120,7 +3120,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %
 ;
 ; X86-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
 ; X86-AVX2:       # %bb.0:
-; X86-AVX2-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX2-NEXT:    vpsrad $31, %xmm0, %xmm1
 ; X86-AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X86-AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
@@ -3128,7 +3128,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[1,3,2,3]
 ; X64-SSE2-NEXT:    psrad $31, %xmm0
 ; X64-SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
@@ -3138,7 +3138,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %
 ;
 ; X64-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
 ; X64-AVX1:       # %bb.0:
-; X64-AVX1-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX1-NEXT:    vpsrad $31, %xmm0, %xmm1
 ; X64-AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X64-AVX1-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
@@ -3146,7 +3146,7 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_32(<2 x i64> %
 ;
 ; X64-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_32:
 ; X64-AVX2:       # %bb.0:
-; X64-AVX2-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX2-NEXT:    vpsrad $31, %xmm0, %xmm1
 ; X64-AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; X64-AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
@@ -3273,25 +3273,25 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> %
 define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddq %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddq %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
@@ -3301,13 +3301,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_32(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllq $32, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX1-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
 ; X86-AVX1:       # %bb.0:
-; X86-AVX1-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX1-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; X86-AVX1-NEXT:    retl
 ;
@@ -3320,13 +3320,13 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_32(<2 x i64> %a0) {
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllq $32, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_32:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllq $32, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
@@ -3385,25 +3385,25 @@ define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_34(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_15(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllq $15, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllq $15, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllq $15, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_15:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllq $15, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -3413,25 +3413,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_15(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_16(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllq $16, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllq $16, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllq $16, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_16:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllq $16, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -3441,25 +3441,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_16(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_17(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllq $17, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllq $17, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllq $17, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_17:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllq $17, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -3469,25 +3469,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_17(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    psllq $18, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpsllq $18, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    psllq $18, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_140737488289792_mask_shl_18:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpsllq $18, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 140737488289792, i64 140737488289792>
@@ -3498,25 +3498,25 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) {
 define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) {
 ; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
 ; X86-SSE2:       # %bb.0:
-; X86-SSE2-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-SSE2-NEXT:    paddq %xmm0, %xmm0
 ; X86-SSE2-NEXT:    retl
 ;
 ; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
 ; X86-AVX:       # %bb.0:
-; X86-AVX-NEXT:    vpand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
 ; X86-AVX-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
 ; X86-AVX-NEXT:    retl
 ;
 ; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
 ; X64-SSE2:       # %bb.0:
-; X64-SSE2-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-SSE2-NEXT:    paddq %xmm0, %xmm0
 ; X64-SSE2-NEXT:    retq
 ;
 ; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
 ; X64-AVX:       # %bb.0:
-; X64-AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
 ; X64-AVX-NEXT:    vpaddq %xmm0, %xmm0, %xmm0
 ; X64-AVX-NEXT:    retq
   %t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>

diff  --git a/llvm/test/CodeGen/X86/x86-shifts.ll b/llvm/test/CodeGen/X86/x86-shifts.ll
index 3063666dabeb..f1f325768a2a 100644
--- a/llvm/test/CodeGen/X86/x86-shifts.ll
+++ b/llvm/test/CodeGen/X86/x86-shifts.ll
@@ -131,7 +131,7 @@ define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind {
 ; X86:       # %bb.0: # %entry
 ; X86-NEXT:    movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4]
 ; X86-NEXT:    pmullw %xmm0, %xmm1
-; X86-NEXT:    pmullw {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-NEXT:    pxor %xmm1, %xmm0
 ; X86-NEXT:    retl
 ;
@@ -139,7 +139,7 @@ define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind {
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4]
 ; X64-NEXT:    pmullw %xmm0, %xmm1
-; X64-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; X64-NEXT:    pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    pxor %xmm1, %xmm0
 ; X64-NEXT:    retq
 entry:
@@ -204,13 +204,13 @@ define <16 x i8> @shl9(<16 x i8> %A) nounwind {
 ; X86-LABEL: shl9:
 ; X86:       # %bb.0:
 ; X86-NEXT:    psllw $3, %xmm0
-; X86-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shl9:
 ; X64:       # %bb.0:
 ; X64-NEXT:    psllw $3, %xmm0
-; X64-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    retq
   %B = shl <16 x i8> %A, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <16 x i8> %B
@@ -220,13 +220,13 @@ define <16 x i8> @shr9(<16 x i8> %A) nounwind {
 ; X86-LABEL: shr9:
 ; X86:       # %bb.0:
 ; X86-NEXT:    psrlw $3, %xmm0
-; X86-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-NEXT:    retl
 ;
 ; X64-LABEL: shr9:
 ; X64:       # %bb.0:
 ; X64-NEXT:    psrlw $3, %xmm0
-; X64-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    retq
   %B = lshr <16 x i8> %A, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <16 x i8> %B
@@ -247,7 +247,7 @@ define <16 x i8> @sra_v16i8(<16 x i8> %A) nounwind {
 ; X86-LABEL: sra_v16i8:
 ; X86:       # %bb.0:
 ; X86-NEXT:    psrlw $3, %xmm0
-; X86-NEXT:    pand {{\.LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
 ; X86-NEXT:    movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
 ; X86-NEXT:    pxor %xmm1, %xmm0
 ; X86-NEXT:    psubb %xmm1, %xmm0
@@ -256,7 +256,7 @@ define <16 x i8> @sra_v16i8(<16 x i8> %A) nounwind {
 ; X64-LABEL: sra_v16i8:
 ; X64:       # %bb.0:
 ; X64-NEXT:    psrlw $3, %xmm0
-; X64-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-NEXT:    pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
 ; X64-NEXT:    movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
 ; X64-NEXT:    pxor %xmm1, %xmm0
 ; X64-NEXT:    psubb %xmm1, %xmm0


        


More information about the llvm-commits mailing list