[llvm] ca0caa2 - [X86] Replace X32 test check prefix with X86 + add common CHECK prefix

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 5 06:41:59 PDT 2023


Author: Simon Pilgrim
Date: 2023-06-05T14:41:40+01:00
New Revision: ca0caa23ce3993d5b222418f48e7d4d00d3994b5

URL: https://github.com/llvm/llvm-project/commit/ca0caa23ce3993d5b222418f48e7d4d00d3994b5
DIFF: https://github.com/llvm/llvm-project/commit/ca0caa23ce3993d5b222418f48e7d4d00d3994b5.diff

LOG: [X86] Replace X32 test check prefix with X86 + add common CHECK prefix

We try to only use X32 for gnux32 triple test cases

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/avx2-arith.ll
    llvm/test/CodeGen/X86/avx2-cmp.ll
    llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
    llvm/test/CodeGen/X86/avx2-logic.ll
    llvm/test/CodeGen/X86/avx2-pmovxrm.ll
    llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/avx2-arith.ll b/llvm/test/CodeGen/X86/avx2-arith.ll
index 2dc33d32e9d3a..3e69581171944 100644
--- a/llvm/test/CodeGen/X86/avx2-arith.ll
+++ b/llvm/test/CodeGen/X86/avx2-arith.ll
@@ -1,158 +1,108 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64
 
 define <4 x i64> @test_vpaddq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
-; X32-LABEL: test_vpaddq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpaddq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpaddq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = add <4 x i64> %i, %j
   ret <4 x i64> %x
 }
 
 define <8 x i32> @test_vpaddd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
-; X32-LABEL: test_vpaddd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpaddd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpaddd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpaddd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = add <8 x i32> %i, %j
   ret <8 x i32> %x
 }
 
 define <16 x i16> @test_vpaddw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
-; X32-LABEL: test_vpaddw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpaddw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpaddw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpaddw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = add <16 x i16> %i, %j
   ret <16 x i16> %x
 }
 
 define <32 x i8> @test_vpaddb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
-; X32-LABEL: test_vpaddb:
-; X32:       # %bb.0:
-; X32-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpaddb:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpaddb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpaddb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = add <32 x i8> %i, %j
   ret <32 x i8> %x
 }
 
 define <4 x i64> @test_vpsubq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
-; X32-LABEL: test_vpsubq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpsubq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpsubq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsubq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = sub <4 x i64> %i, %j
   ret <4 x i64> %x
 }
 
 define <8 x i32> @test_vpsubd(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
-; X32-LABEL: test_vpsubd:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpsubd:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpsubd:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsubd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = sub <8 x i32> %i, %j
   ret <8 x i32> %x
 }
 
 define <16 x i16> @test_vpsubw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
-; X32-LABEL: test_vpsubw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpsubw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpsubw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsubw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = sub <16 x i16> %i, %j
   ret <16 x i16> %x
 }
 
 define <32 x i8> @test_vpsubb(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
-; X32-LABEL: test_vpsubb:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpsubb:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpsubb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsubb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = sub <32 x i8> %i, %j
   ret <32 x i8> %x
 }
 
 define <8 x i32> @test_vpmulld(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
-; X32-LABEL: test_vpmulld:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpmulld:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpmulld:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = mul <8 x i32> %i, %j
   ret <8 x i32> %x
 }
 
 define <16 x i16> @test_vpmullw(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
-; X32-LABEL: test_vpmullw:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test_vpmullw:
-; X64:       # %bb.0:
-; X64-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test_vpmullw:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = mul <16 x i16> %i, %j
   ret <16 x i16> %x
 }
 
 define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
-; X32-LABEL: mul_v16i8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
-; X32-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
-; X32-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    vextracti128 $1, %ymm0, %xmm1
-; X32-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
-; X32-NEXT:    vzeroupper
-; X32-NEXT:    retl
+; X86-LABEL: mul_v16i8:
+; X86:       # %bb.0:
+; X86-NEXT:    vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
+; X86-NEXT:    vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
+; X86-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; X86-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; X86-NEXT:    vpackuswb %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vzeroupper
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_v16i8:
 ; X64:       # %bb.0:
@@ -169,142 +119,90 @@ define <16 x i8> @mul_v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {
 }
 
 define <32 x i8> @mul_v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
-; X32-LABEL: mul_v32i8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; X32-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; X32-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
-; X32-NEXT:    vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; X32-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; X32-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; X32-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; X32-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; X32-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_v32i8:
-; X64:       # %bb.0:
-; X64-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; X64-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; X64-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
-; X64-NEXT:    vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
-; X64-NEXT:    vpand %ymm3, %ymm2, %ymm2
-; X64-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; X64-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
-; X64-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vpand %ymm3, %ymm0, %ymm0
-; X64-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_v32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; CHECK-NEXT:    vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
+; CHECK-NEXT:    vpmullw %ymm2, %ymm3, %ymm2
+; CHECK-NEXT:    vpbroadcastw {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
+; CHECK-NEXT:    vpand %ymm3, %ymm2, %ymm2
+; CHECK-NEXT:    vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; CHECK-NEXT:    vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
+; CHECK-NEXT:    vpmullw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpand %ymm3, %ymm0, %ymm0
+; CHECK-NEXT:    vpackuswb %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = mul <32 x i8> %i, %j
   ret <32 x i8> %x
 }
 
 define <4 x i64> @mul_v4i64(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
-; X32-LABEL: mul_v4i64:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsrlq $32, %ymm0, %ymm2
-; X32-NEXT:    vpmuludq %ymm1, %ymm2, %ymm2
-; X32-NEXT:    vpsrlq $32, %ymm1, %ymm3
-; X32-NEXT:    vpmuludq %ymm3, %ymm0, %ymm3
-; X32-NEXT:    vpaddq %ymm2, %ymm3, %ymm2
-; X32-NEXT:    vpsllq $32, %ymm2, %ymm2
-; X32-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_v4i64:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsrlq $32, %ymm0, %ymm2
-; X64-NEXT:    vpmuludq %ymm1, %ymm2, %ymm2
-; X64-NEXT:    vpsrlq $32, %ymm1, %ymm3
-; X64-NEXT:    vpmuludq %ymm3, %ymm0, %ymm3
-; X64-NEXT:    vpaddq %ymm2, %ymm3, %ymm2
-; X64-NEXT:    vpsllq $32, %ymm2, %ymm2
-; X64-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsrlq $32, %ymm0, %ymm2
+; CHECK-NEXT:    vpmuludq %ymm1, %ymm2, %ymm2
+; CHECK-NEXT:    vpsrlq $32, %ymm1, %ymm3
+; CHECK-NEXT:    vpmuludq %ymm3, %ymm0, %ymm3
+; CHECK-NEXT:    vpaddq %ymm2, %ymm3, %ymm2
+; CHECK-NEXT:    vpsllq $32, %ymm2, %ymm2
+; CHECK-NEXT:    vpmuludq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vpaddq %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %x = mul <4 x i64> %i, %j
   ret <4 x i64> %x
 }
 
 define <8 x i32> @mul_const1(<8 x i32> %x) {
-; X32-LABEL: mul_const1:
-; X32:       # %bb.0:
-; X32-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const1:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpaddd %ymm0, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <8 x i32> %x, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
   ret <8 x i32> %y
 }
 
 define <4 x i64> @mul_const2(<4 x i64> %x) {
-; X32-LABEL: mul_const2:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllq $2, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const2:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllq $2, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllq $2, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <4 x i64> %x, <i64 4, i64 4, i64 4, i64 4>
   ret <4 x i64> %y
 }
 
 define <16 x i16> @mul_const3(<16 x i16> %x) {
-; X32-LABEL: mul_const3:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllw $3, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const3:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllw $3, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $3, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <16 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   ret <16 x i16> %y
 }
 
 define <4 x i64> @mul_const4(<4 x i64> %x) {
-; X32-LABEL: mul_const4:
-; X32:       # %bb.0:
-; X32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vpsubq %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const4:
-; X64:       # %bb.0:
-; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
-; X64-NEXT:    vpsubq %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vpsubq %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <4 x i64> %x, <i64 -1, i64 -1, i64 -1, i64 -1>
   ret <4 x i64> %y
 }
 
 define <8 x i32> @mul_const5(<8 x i32> %x) {
-; X32-LABEL: mul_const5:
-; X32:       # %bb.0:
-; X32-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const5:
-; X64:       # %bb.0:
-; X64-NEXT:    vxorps %xmm0, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const5:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <8 x i32> %x, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
   ret <8 x i32> %y
 }
 
 define <8 x i32> @mul_const6(<8 x i32> %x) {
-; X32-LABEL: mul_const6:
-; X32:       # %bb.0:
-; X32-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: mul_const6:
+; X86:       # %bb.0:
+; X86-NEXT:    vpmulld {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: mul_const6:
 ; X64:       # %bb.0:
@@ -315,81 +213,52 @@ define <8 x i32> @mul_const6(<8 x i32> %x) {
 }
 
 define <8 x i64> @mul_const7(<8 x i64> %x) {
-; X32-LABEL: mul_const7:
-; X32:       # %bb.0:
-; X32-NEXT:    vpaddq %ymm0, %ymm0, %ymm0
-; X32-NEXT:    vpaddq %ymm1, %ymm1, %ymm1
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const7:
-; X64:       # %bb.0:
-; X64-NEXT:    vpaddq %ymm0, %ymm0, %ymm0
-; X64-NEXT:    vpaddq %ymm1, %ymm1, %ymm1
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpaddq %ymm0, %ymm0, %ymm0
+; CHECK-NEXT:    vpaddq %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <8 x i64> %x, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
   ret <8 x i64> %y
 }
 
 define <8 x i16> @mul_const8(<8 x i16> %x) {
-; X32-LABEL: mul_const8:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllw $3, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const8:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllw $3, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $3, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <8 x i16> %x, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
   ret <8 x i16> %y
 }
 
 define <8 x i32> @mul_const9(<8 x i32> %x) {
-; X32-LABEL: mul_const9:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovdqa {{.*#+}} xmm1 = [2,0,0,0]
-; X32-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const9:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovdqa {{.*#+}} xmm1 = [2,0,0,0]
-; X64-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const9:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovdqa {{.*#+}} xmm1 = [2,0,0,0]
+; CHECK-NEXT:    vpmulld %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %y = mul <8 x i32> %x, <i32 2, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
   ret <8 x i32> %y
 }
 
 ; ptr 0x01010101
 define <4 x i32> @mul_const10(<4 x i32> %x) {
-; X32-LABEL: mul_const10:
-; X32:       # %bb.0:
-; X32-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
-; X32-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const10:
-; X64:       # %bb.0:
-; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
-; X64-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const10:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [16843009,16843009,16843009,16843009]
+; CHECK-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %m = mul <4 x i32> %x, <i32 16843009, i32 16843009, i32 16843009, i32 16843009>
   ret <4 x i32> %m
 }
 
 ; ptr 0x80808080
 define <4 x i32> @mul_const11(<4 x i32> %x) {
-; X32-LABEL: mul_const11:
-; X32:       # %bb.0:
-; X32-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
-; X32-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: mul_const11:
-; X64:       # %bb.0:
-; X64-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
-; X64-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: mul_const11:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpbroadcastd {{.*#+}} xmm1 = [2155905152,2155905152,2155905152,2155905152]
+; CHECK-NEXT:    vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %m = mul <4 x i32> %x, <i32 2155905152, i32 2155905152, i32 2155905152, i32 2155905152>
   ret <4 x i32> %m
 }

diff  --git a/llvm/test/CodeGen/X86/avx2-cmp.ll b/llvm/test/CodeGen/X86/avx2-cmp.ll
index 2d710e40daf52..04b673ca5afc8 100644
--- a/llvm/test/CodeGen/X86/avx2-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx2-cmp.ll
@@ -1,123 +1,86 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64
 
 define <8 x i32> @v8i32_cmpgt(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
-; X32-LABEL: v8i32_cmpgt:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpgtd %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: v8i32_cmpgt:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpgtd %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: v8i32_cmpgt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpgtd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp slt <8 x i32> %i, %j
   %x = sext <8 x i1> %bincmp to <8 x i32>
   ret <8 x i32> %x
 }
 
 define <4 x i64> @v4i64_cmpgt(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
-; X32-LABEL: v4i64_cmpgt:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: v4i64_cmpgt:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: v4i64_cmpgt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpgtq %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp slt <4 x i64> %i, %j
   %x = sext <4 x i1> %bincmp to <4 x i64>
   ret <4 x i64> %x
 }
 
 define <16 x i16> @v16i16_cmpgt(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
-; X32-LABEL: v16i16_cmpgt:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: v16i16_cmpgt:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: v16i16_cmpgt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpgtw %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp slt <16 x i16> %i, %j
   %x = sext <16 x i1> %bincmp to <16 x i16>
   ret <16 x i16> %x
 }
 
 define <32 x i8> @v32i8_cmpgt(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
-; X32-LABEL: v32i8_cmpgt:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: v32i8_cmpgt:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: v32i8_cmpgt:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpgtb %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp slt <32 x i8> %i, %j
   %x = sext <32 x i1> %bincmp to <32 x i8>
   ret <32 x i8> %x
 }
 
 define <8 x i32> @int256_cmpeq(<8 x i32> %i, <8 x i32> %j) nounwind readnone {
-; X32-LABEL: int256_cmpeq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: int256_cmpeq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: int256_cmpeq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpeqd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp eq <8 x i32> %i, %j
   %x = sext <8 x i1> %bincmp to <8 x i32>
   ret <8 x i32> %x
 }
 
 define <4 x i64> @v4i64_cmpeq(<4 x i64> %i, <4 x i64> %j) nounwind readnone {
-; X32-LABEL: v4i64_cmpeq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: v4i64_cmpeq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: v4i64_cmpeq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpeqq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp eq <4 x i64> %i, %j
   %x = sext <4 x i1> %bincmp to <4 x i64>
   ret <4 x i64> %x
 }
 
 define <16 x i16> @v16i16_cmpeq(<16 x i16> %i, <16 x i16> %j) nounwind readnone {
-; X32-LABEL: v16i16_cmpeq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: v16i16_cmpeq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: v16i16_cmpeq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpeqw %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp eq <16 x i16> %i, %j
   %x = sext <16 x i1> %bincmp to <16 x i16>
   ret <16 x i16> %x
 }
 
 define <32 x i8> @v32i8_cmpeq(<32 x i8> %i, <32 x i8> %j) nounwind readnone {
-; X32-LABEL: v32i8_cmpeq:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: v32i8_cmpeq:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: v32i8_cmpeq:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpeqb %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %bincmp = icmp eq <32 x i8> %i, %j
   %x = sext <32 x i1> %bincmp to <32 x i8>
   ret <32 x i8> %x
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64: {{.*}}
+; X86: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll b/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
index 5d7fc7aaa185f..d6001da849e31 100644
--- a/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
+++ b/llvm/test/CodeGen/X86/avx2-fma-fneg-combine.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fma | FileCheck %s --check-prefixes=CHECK,X64
 
 declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
 declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
@@ -11,49 +11,32 @@ declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
 ; This test checks combinations of FNEG and FMA intrinsics
 
 define <8 x float> @test1(<8 x float> %a, <8 x float> %b, <8 x float> %c)  {
-; X32-LABEL: test1:
-; X32:       # %bb.0:
-; X32-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2
-; X32-NEXT:    retl
-;
-; X64-LABEL: test1:
-; X64:       # %bb.0:
-; X64-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2
-; X64-NEXT:    retq
+; CHECK-LABEL: test1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2
+; CHECK-NEXT:    ret{{[l|q]}}
   %sub.i = fsub <8 x float> <float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0>, %c
   %r = tail call nsz <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %sub.i) #2
   ret <8 x float> %r
 }
 
 define <4 x float> @test2(<4 x float> %a, <4 x float> %b, <4 x float> %c) {
-; X32-LABEL: test2:
-; X32:       # %bb.0:
-; X32-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
-; X32-NEXT:    retl
-;
-; X64-LABEL: test2:
-; X64:       # %bb.0:
-; X64-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
-; X64-NEXT:    retq
+; CHECK-LABEL: test2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmsub213ps {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
+; CHECK-NEXT:    ret{{[l|q]}}
   %t0 = tail call nsz <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2
   %sub.i = fsub <4 x float> <float -0.0, float -0.0, float -0.0, float -0.0>, %t0
   ret <4 x float> %sub.i
 }
 
 define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c)  {
-; X32-LABEL: test3:
-; X32:       # %bb.0:
-; X32-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
-; X32-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X32-NEXT:    vxorps %xmm1, %xmm0, %xmm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test3:
-; X64:       # %bb.0:
-; X64-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
-; X64-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X64-NEXT:    vxorps %xmm1, %xmm0, %xmm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2
+; CHECK-NEXT:    vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
+; CHECK-NEXT:    vxorps %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %a0 = extractelement <4 x float> %a, i64 0
   %b0 = extractelement <4 x float> %b, i64 0
   %c0 = extractelement <4 x float> %c, i64 0
@@ -65,15 +48,10 @@ define <4 x float> @test3(<4 x float> %a, <4 x float> %b, <4 x float> %c)  {
 }
 
 define <8 x float> @test4(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
-; X32-LABEL: test4:
-; X32:       # %bb.0:
-; X32-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
-; X32-NEXT:    retl
-;
-; X64-LABEL: test4:
-; X64:       # %bb.0:
-; X64-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
-; X64-NEXT:    retq
+; CHECK-LABEL: test4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2
+; CHECK-NEXT:    ret{{[l|q]}}
   %negc = fneg <8 x float> %c
   %t0 = tail call nsz <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negc) #2
   %sub.i = fsub <8 x float> <float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0>, %t0
@@ -81,15 +59,10 @@ define <8 x float> @test4(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
 }
 
 define <8 x float> @test5(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
-; X32-LABEL: test5:
-; X32:       # %bb.0:
-; X32-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
-; X32-NEXT:    retl
-;
-; X64-LABEL: test5:
-; X64:       # %bb.0:
-; X64-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
-; X64-NEXT:    retq
+; CHECK-LABEL: test5:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
+; CHECK-NEXT:    ret{{[l|q]}}
   %sub.c = fsub <8 x float> <float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0, float -0.0>, %c
   %negsubc = fneg <8 x float> %sub.c
   %t0 = tail call nsz <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %negsubc) #2
@@ -97,26 +70,21 @@ define <8 x float> @test5(<8 x float> %a, <8 x float> %b, <8 x float> %c) {
 }
 
 define <2 x double> @test6(<2 x double> %a, <2 x double> %b, <2 x double> %c) {
-; X32-LABEL: test6:
-; X32:       # %bb.0:
-; X32-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
-; X32-NEXT:    retl
-;
-; X64-LABEL: test6:
-; X64:       # %bb.0:
-; X64-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
-; X64-NEXT:    retq
+; CHECK-LABEL: test6:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vfnmsub213pd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2
+; CHECK-NEXT:    ret{{[l|q]}}
   %t0 = tail call nsz <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2
   %sub.i = fsub <2 x double> <double -0.0, double -0.0>, %t0
   ret <2 x double> %sub.i
 }
 
 define <8 x float> @test7(float %a, <8 x float> %b, <8 x float> %c)  {
-; X32-LABEL: test7:
-; X32:       # %bb.0:
-; X32-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %ymm2
-; X32-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm1
-; X32-NEXT:    retl
+; X86-LABEL: test7:
+; X86:       # %bb.0:
+; X86-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %ymm2
+; X86-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm1
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test7:
 ; X64:       # %bb.0:
@@ -132,11 +100,11 @@ define <8 x float> @test7(float %a, <8 x float> %b, <8 x float> %c)  {
 }
 
 define <8 x float> @test8(float %a, <8 x float> %b, <8 x float> %c)  {
-; X32-LABEL: test8:
-; X32:       # %bb.0:
-; X32-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %ymm2
-; X32-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm1
-; X32-NEXT:    retl
+; X86-LABEL: test8:
+; X86:       # %bb.0:
+; X86-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %ymm2
+; X86-NEXT:    vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm1
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test8:
 ; X64:       # %bb.0:
@@ -151,39 +119,24 @@ define <8 x float> @test8(float %a, <8 x float> %b, <8 x float> %c)  {
 }
 
 define <4 x double> @test9(<4 x double> %a) {
-; X32-LABEL: test9:
-; X32:       # %bb.0:
-; X32-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; X32-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm1
-; X32-NEXT:    retl
-;
-; X64-LABEL: test9:
-; X64:       # %bb.0:
-; X64-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
-; X64-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm1
-; X64-NEXT:    retq
+; CHECK-LABEL: test9:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vbroadcastsd {{.*#+}} ymm1 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
+; CHECK-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm1
+; CHECK-NEXT:    ret{{[l|q]}}
   %t = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01>, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>)
   ret <4 x double> %t
 }
 
 define <4 x double> @test10(<4 x double> %a, <4 x double> %b) {
-; X32-LABEL: test10:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovapd {{.*#+}} ymm2 = <-9.5E+0,u,-5.5E+0,-2.5E+0>
-; X32-NEXT:    vmovapd %ymm2, %ymm3
-; X32-NEXT:    vfmadd213pd {{.*#+}} ymm3 = (ymm0 * ymm3) + ymm1
-; X32-NEXT:    vfnmadd213pd {{.*#+}} ymm2 = -(ymm0 * ymm2) + ymm1
-; X32-NEXT:    vaddpd %ymm2, %ymm3, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test10:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovapd {{.*#+}} ymm2 = <-9.5E+0,u,-5.5E+0,-2.5E+0>
-; X64-NEXT:    vmovapd %ymm2, %ymm3
-; X64-NEXT:    vfmadd213pd {{.*#+}} ymm3 = (ymm0 * ymm3) + ymm1
-; X64-NEXT:    vfnmadd213pd {{.*#+}} ymm2 = -(ymm0 * ymm2) + ymm1
-; X64-NEXT:    vaddpd %ymm2, %ymm3, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test10:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovapd {{.*#+}} ymm2 = <-9.5E+0,u,-5.5E+0,-2.5E+0>
+; CHECK-NEXT:    vmovapd %ymm2, %ymm3
+; CHECK-NEXT:    vfmadd213pd {{.*#+}} ymm3 = (ymm0 * ymm3) + ymm1
+; CHECK-NEXT:    vfnmadd213pd {{.*#+}} ymm2 = -(ymm0 * ymm2) + ymm1
+; CHECK-NEXT:    vaddpd %ymm2, %ymm3, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %t0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double -95.00000e-01, double undef, double -55.00000e-01, double -25.00000e-01>, <4 x double> %b)
   %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 95.00000e-01, double undef, double 55.00000e-01, double 25.00000e-01>, <4 x double> %b)
   %t2 = fadd <4 x double> %t0, %t1
@@ -191,42 +144,26 @@ define <4 x double> @test10(<4 x double> %a, <4 x double> %b) {
 }
 
 define <4 x double> @test11(<4 x double> %a) {
-; X32-LABEL: test11:
-; X32:       # %bb.0:
-; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [5.0E-1,2.5E+0,5.0E-1,2.5E+0]
-; X32-NEXT:    # ymm1 = mem[0,1,0,1]
-; X32-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm1
-; X32-NEXT:    retl
-;
-; X64-LABEL: test11:
-; X64:       # %bb.0:
-; X64-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [5.0E-1,2.5E+0,5.0E-1,2.5E+0]
-; X64-NEXT:    # ymm1 = mem[0,1,0,1]
-; X64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm1
-; X64-NEXT:    retq
+; CHECK-LABEL: test11:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = [5.0E-1,2.5E+0,5.0E-1,2.5E+0]
+; CHECK-NEXT:    # ymm1 = mem[0,1,0,1]
+; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vfmsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm1
+; CHECK-NEXT:    ret{{[l|q]}}
   %t0 = fadd <4 x double> %a, <double 5.000000e-01, double 25.00000e-01, double 5.000000e-01, double 25.00000e-01>
   %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %t0, <4 x double> <double 5.000000e-01, double 25.00000e-01, double 5.000000e-01, double 25.00000e-01>, <4 x double> <double -5.000000e-01, double -25.00000e-01, double -5.000000e-01, double -25.00000e-01>)
   ret <4 x double> %t1
 }
 
 define <4 x double> @test12(<4 x double> %a, <4 x double> %b) {
-; X32-LABEL: test12:
-; X32:       # %bb.0:
-; X32-NEXT:    vmovapd {{.*#+}} ymm2 = [-7.5E+0,-2.5E+0,-5.5E+0,-9.5E+0]
-; X32-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm2 * ymm0) + mem
-; X32-NEXT:    vfmadd132pd {{.*#+}} ymm1 = (ymm1 * mem) + ymm2
-; X32-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: test12:
-; X64:       # %bb.0:
-; X64-NEXT:    vmovapd {{.*#+}} ymm2 = [-7.5E+0,-2.5E+0,-5.5E+0,-9.5E+0]
-; X64-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm2 * ymm0) + mem
-; X64-NEXT:    vfmadd132pd {{.*#+}} ymm1 = (ymm1 * mem) + ymm2
-; X64-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: test12:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vmovapd {{.*#+}} ymm2 = [-7.5E+0,-2.5E+0,-5.5E+0,-9.5E+0]
+; CHECK-NEXT:    vfnmadd213pd {{.*#+}} ymm0 = -(ymm2 * ymm0) + mem
+; CHECK-NEXT:    vfmadd132pd {{.*#+}} ymm1 = (ymm1 * mem) + ymm2
+; CHECK-NEXT:    vaddpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %t0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> <double 75.00000e-01, double 25.00000e-01, double 55.00000e-01, double 95.00000e-01>, <4 x double> <double -75.00000e-01, double undef, double -55.00000e-01, double -95.00000e-01>)
   %t1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %b, <4 x double> <double undef, double 25.00000e-01, double 55.00000e-01, double 95.00000e-01>, <4 x double> <double -75.00000e-01, double -25.00000e-01, double -55.00000e-01, double -95.00000e-01>)
   %t2 = fadd <4 x double> %t0, %t1

diff  --git a/llvm/test/CodeGen/X86/avx2-logic.ll b/llvm/test/CodeGen/X86/avx2-logic.ll
index f7b62eabb5735..df49fbc6790f4 100644
--- a/llvm/test/CodeGen/X86/avx2-logic.ll
+++ b/llvm/test/CodeGen/X86/avx2-logic.ll
@@ -1,21 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,X64
 
 define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
-; X32-LABEL: vpandn:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; X32-NEXT:    vpsubq %ymm1, %ymm0, %ymm1
-; X32-NEXT:    vpandn %ymm0, %ymm1, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vpandn:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
-; X64-NEXT:    vpsubq %ymm1, %ymm0, %ymm1
-; X64-NEXT:    vpandn %ymm0, %ymm1, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vpandn:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcmpeqd %ymm1, %ymm1, %ymm1
+; CHECK-NEXT:    vpsubq %ymm1, %ymm0, %ymm1
+; CHECK-NEXT:    vpandn %ymm0, %ymm1, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   ; Force the execution domain with an add.
   %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
@@ -25,19 +18,12 @@ entry:
 }
 
 define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
-; X32-LABEL: vpand:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; X32-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
-; X32-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vpand:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; X64-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
-; X64-NEXT:    vpand %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vpand:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpand %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   ; Force the execution domain with an add.
   %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
@@ -46,19 +32,12 @@ entry:
 }
 
 define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
-; X32-LABEL: vpor:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; X32-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
-; X32-NEXT:    vpor %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vpor:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; X64-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
-; X64-NEXT:    vpor %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vpor:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpor %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   ; Force the execution domain with an add.
   %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
@@ -67,19 +46,12 @@ entry:
 }
 
 define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
-; X32-LABEL: vpxor:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; X32-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
-; X32-NEXT:    vpxor %ymm1, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vpxor:
-; X64:       # %bb.0: # %entry
-; X64-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
-; X64-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
-; X64-NEXT:    vpxor %ymm1, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vpxor:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vpcmpeqd %ymm2, %ymm2, %ymm2
+; CHECK-NEXT:    vpsubq %ymm2, %ymm0, %ymm0
+; CHECK-NEXT:    vpxor %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
 entry:
   ; Force the execution domain with an add.
   %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
@@ -88,43 +60,30 @@ entry:
 }
 
 define <32 x i8> @vpblendvb(<32 x i1> %cond, <32 x i8> %x, <32 x i8> %y) {
-; X32-LABEL: vpblendvb:
-; X32:       # %bb.0:
-; X32-NEXT:    vpsllw $7, %ymm0, %ymm0
-; X32-NEXT:    vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: vpblendvb:
-; X64:       # %bb.0:
-; X64-NEXT:    vpsllw $7, %ymm0, %ymm0
-; X64-NEXT:    vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: vpblendvb:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $7, %ymm0, %ymm0
+; CHECK-NEXT:    vpblendvb %ymm0, %ymm1, %ymm2, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
   %min = select <32 x i1> %cond, <32 x i8> %x, <32 x i8> %y
   ret <32 x i8> %min
 }
 
 define <8 x i32> @allOnes() nounwind {
-; X32-LABEL: allOnes:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: allOnes:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: allOnes:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
         ret <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
 }
 
 define <16 x i16> @allOnes2() nounwind {
-; X32-LABEL: allOnes2:
-; X32:       # %bb.0:
-; X32-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
-; X32-NEXT:    retl
-;
-; X64-LABEL: allOnes2:
-; X64:       # %bb.0:
-; X64-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
-; X64-NEXT:    retq
+; CHECK-LABEL: allOnes2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpcmpeqd %ymm0, %ymm0, %ymm0
+; CHECK-NEXT:    ret{{[l|q]}}
         ret <16 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
 }
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; X64: {{.*}}
+; X86: {{.*}}

diff  --git a/llvm/test/CodeGen/X86/avx2-pmovxrm.ll b/llvm/test/CodeGen/X86/avx2-pmovxrm.ll
index 8419a8f53f74f..6ddbc82965b42 100644
--- a/llvm/test/CodeGen/X86/avx2-pmovxrm.ll
+++ b/llvm/test/CodeGen/X86/avx2-pmovxrm.ll
@@ -1,15 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=+avx512vl,avx512bw | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -disable-peephole -mtriple=i686-apple-darwin -mattr=+avx512vl,avx512bw | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512vl,avx512bw | FileCheck %s --check-prefix=X64
 
 define <16 x i16> @test_llvm_x86_avx2_pmovsxbw(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovsxbw:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovsxbw (%eax), %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovsxbw:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsxbw (%eax), %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovsxbw:
 ; X64:       ## %bb.0:
@@ -21,11 +21,11 @@ define <16 x i16> @test_llvm_x86_avx2_pmovsxbw(ptr %a) {
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovsxbd:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovsxbd (%eax), %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovsxbd:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsxbd (%eax), %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovsxbd:
 ; X64:       ## %bb.0:
@@ -38,11 +38,11 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxbd(ptr %a) {
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovsxbq:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovsxbq (%eax), %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovsxbq:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsxbq (%eax), %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovsxbq:
 ; X64:       ## %bb.0:
@@ -55,11 +55,11 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxbq(ptr %a) {
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovsxwd:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovsxwd (%eax), %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovsxwd:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsxwd (%eax), %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovsxwd:
 ; X64:       ## %bb.0:
@@ -71,11 +71,11 @@ define <8 x i32> @test_llvm_x86_avx2_pmovsxwd(ptr %a) {
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovsxwq:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovsxwq (%eax), %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovsxwq:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsxwq (%eax), %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovsxwq:
 ; X64:       ## %bb.0:
@@ -88,11 +88,11 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxwq(ptr %a) {
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovsxdq:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovsxdq (%eax), %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovsxdq:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovsxdq (%eax), %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovsxdq:
 ; X64:       ## %bb.0:
@@ -104,11 +104,11 @@ define <4 x i64> @test_llvm_x86_avx2_pmovsxdq(ptr %a) {
 }
 
 define <16 x i16> @test_llvm_x86_avx2_pmovzxbw(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovzxbw:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovzxbw:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovzxbw:
 ; X64:       ## %bb.0:
@@ -120,11 +120,11 @@ define <16 x i16> @test_llvm_x86_avx2_pmovzxbw(ptr %a) {
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovzxbd:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovzxbd:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovzxbd:
 ; X64:       ## %bb.0:
@@ -137,11 +137,11 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxbd(ptr %a) {
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovzxbq:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovzxbq:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovzxbq:
 ; X64:       ## %bb.0:
@@ -154,11 +154,11 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxbq(ptr %a) {
 }
 
 define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovzxwd:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovzxwd:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovzxwd:
 ; X64:       ## %bb.0:
@@ -170,11 +170,11 @@ define <8 x i32> @test_llvm_x86_avx2_pmovzxwd(ptr %a) {
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovzxwq:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovzxwq:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovzxwq:
 ; X64:       ## %bb.0:
@@ -187,11 +187,11 @@ define <4 x i64> @test_llvm_x86_avx2_pmovzxwq(ptr %a) {
 }
 
 define <4 x i64> @test_llvm_x86_avx2_pmovzxdq(ptr %a) {
-; X32-LABEL: test_llvm_x86_avx2_pmovzxdq:
-; X32:       ## %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
-; X32-NEXT:    retl
+; X86-LABEL: test_llvm_x86_avx2_pmovzxdq:
+; X86:       ## %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_llvm_x86_avx2_pmovzxdq:
 ; X64:       ## %bb.0:

diff  --git a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
index 4fef0128cf638..b5cf3b616f6a6 100644
--- a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
+++ b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll
@@ -1,14 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
 
 define <4 x double> @test_broadcast_2f64_4f64(ptr%p) nounwind {
-; X32-LABEL: test_broadcast_2f64_4f64:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT:    vaddpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_2f64_4f64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-NEXT:    vaddpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2f64_4f64:
 ; X64:       # %bb.0:
@@ -22,12 +22,12 @@ define <4 x double> @test_broadcast_2f64_4f64(ptr%p) nounwind {
 }
 
 define <4 x i64> @test_broadcast_2i64_4i64(ptr%p) nounwind {
-; X32-LABEL: test_broadcast_2i64_4i64:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT:    vpaddq {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_2i64_4i64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-NEXT:    vpaddq {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2i64_4i64:
 ; X64:       # %bb.0:
@@ -41,12 +41,12 @@ define <4 x i64> @test_broadcast_2i64_4i64(ptr%p) nounwind {
 }
 
 define <8 x float> @test_broadcast_4f32_8f32(ptr%p) nounwind {
-; X32-LABEL: test_broadcast_4f32_8f32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT:    vaddps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_4f32_8f32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-NEXT:    vaddps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4f32_8f32:
 ; X64:       # %bb.0:
@@ -60,12 +60,12 @@ define <8 x float> @test_broadcast_4f32_8f32(ptr%p) nounwind {
 }
 
 define <8 x i32> @test_broadcast_4i32_8i32(ptr%p) nounwind {
-; X32-LABEL: test_broadcast_4i32_8i32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_4i32_8i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4i32_8i32:
 ; X64:       # %bb.0:
@@ -79,12 +79,12 @@ define <8 x i32> @test_broadcast_4i32_8i32(ptr%p) nounwind {
 }
 
 define <16 x i16> @test_broadcast_8i16_16i16(ptr%p) nounwind {
-; X32-LABEL: test_broadcast_8i16_16i16:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT:    vpaddw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_8i16_16i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-NEXT:    vpaddw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_8i16_16i16:
 ; X64:       # %bb.0:
@@ -98,12 +98,12 @@ define <16 x i16> @test_broadcast_8i16_16i16(ptr%p) nounwind {
 }
 
 define <32 x i8> @test_broadcast_16i8_32i8(ptr%p) nounwind {
-; X32-LABEL: test_broadcast_16i8_32i8:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT:    vpaddb {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_16i8_32i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-NEXT:    vpaddb {{\.?LCPI[0-9]+_[0-9]+}}, %ymm0, %ymm0
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_16i8_32i8:
 ; X64:       # %bb.0:
@@ -117,14 +117,14 @@ define <32 x i8> @test_broadcast_16i8_32i8(ptr%p) nounwind {
 }
 
 define <4 x double> @test_broadcast_2f64_4f64_reuse(ptr %p0, ptr %p1) {
-; X32-LABEL: test_broadcast_2f64_4f64_reuse:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT:    vaddpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
-; X32-NEXT:    vmovapd %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_2f64_4f64_reuse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-NEXT:    vaddpd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
+; X86-NEXT:    vmovapd %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2f64_4f64_reuse:
 ; X64:       # %bb.0:
@@ -140,14 +140,14 @@ define <4 x double> @test_broadcast_2f64_4f64_reuse(ptr %p0, ptr %p1) {
 }
 
 define <4 x i64> @test_broadcast_2i64_4i64_reuse(ptr %p0, ptr %p1) {
-; X32-LABEL: test_broadcast_2i64_4i64_reuse:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT:    vpaddq {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
-; X32-NEXT:    vmovdqa %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_2i64_4i64_reuse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-NEXT:    vpaddq {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
+; X86-NEXT:    vmovdqa %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_2i64_4i64_reuse:
 ; X64:       # %bb.0:
@@ -163,14 +163,14 @@ define <4 x i64> @test_broadcast_2i64_4i64_reuse(ptr %p0, ptr %p1) {
 }
 
 define <8 x float> @test_broadcast_4f32_8f32_reuse(ptr %p0, ptr %p1) {
-; X32-LABEL: test_broadcast_4f32_8f32_reuse:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT:    vaddps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
-; X32-NEXT:    vmovaps %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_4f32_8f32_reuse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-NEXT:    vaddps {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
+; X86-NEXT:    vmovaps %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4f32_8f32_reuse:
 ; X64:       # %bb.0:
@@ -186,14 +186,14 @@ define <8 x float> @test_broadcast_4f32_8f32_reuse(ptr %p0, ptr %p1) {
 }
 
 define <8 x i32> @test_broadcast_4i32_8i32_reuse(ptr %p0, ptr %p1) {
-; X32-LABEL: test_broadcast_4i32_8i32_reuse:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
-; X32-NEXT:    vmovdqa %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_4i32_8i32_reuse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-NEXT:    vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
+; X86-NEXT:    vmovdqa %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_4i32_8i32_reuse:
 ; X64:       # %bb.0:
@@ -209,14 +209,14 @@ define <8 x i32> @test_broadcast_4i32_8i32_reuse(ptr %p0, ptr %p1) {
 }
 
 define <16 x i16> @test_broadcast_8i16_16i16_reuse(ptr%p0, ptr%p1) nounwind {
-; X32-LABEL: test_broadcast_8i16_16i16_reuse:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT:    vpaddw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
-; X32-NEXT:    vmovdqa %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_8i16_16i16_reuse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-NEXT:    vpaddw {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
+; X86-NEXT:    vmovdqa %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_8i16_16i16_reuse:
 ; X64:       # %bb.0:
@@ -232,14 +232,14 @@ define <16 x i16> @test_broadcast_8i16_16i16_reuse(ptr%p0, ptr%p1) nounwind {
 }
 
 define <32 x i8> @test_broadcast_16i8_32i8_reuse(ptr%p0, ptr%p1) nounwind {
-; X32-LABEL: test_broadcast_16i8_32i8_reuse:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
-; X32-NEXT:    vpaddb {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
-; X32-NEXT:    vmovdqa %xmm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: test_broadcast_16i8_32i8_reuse:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vbroadcasti128 {{.*#+}} ymm1 = mem[0,1,0,1]
+; X86-NEXT:    vpaddb {{\.?LCPI[0-9]+_[0-9]+}}, %ymm1, %ymm0
+; X86-NEXT:    vmovdqa %xmm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: test_broadcast_16i8_32i8_reuse:
 ; X64:       # %bb.0:
@@ -255,14 +255,14 @@ define <32 x i8> @test_broadcast_16i8_32i8_reuse(ptr%p0, ptr%p1) nounwind {
 }
 
 define <8 x i32> @PR29088(ptr %p0, ptr %p1) {
-; X32-LABEL: PR29088:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; X32-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; X32-NEXT:    vmovaps %ymm1, (%eax)
-; X32-NEXT:    retl
+; X86-LABEL: PR29088:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; X86-NEXT:    vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; X86-NEXT:    vmovaps %ymm1, (%eax)
+; X86-NEXT:    retl
 ;
 ; X64-LABEL: PR29088:
 ; X64:       # %bb.0:


        


More information about the llvm-commits mailing list