[llvm] bbc3e03 - [X86] Remove some unused check-prefixes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 9 07:05:10 PST 2020


Author: Simon Pilgrim
Date: 2020-11-09T15:04:57Z
New Revision: bbc3e03032c5aa178c52e78a6c221ef0c6e249ca

URL: https://github.com/llvm/llvm-project/commit/bbc3e03032c5aa178c52e78a6c221ef0c6e249ca
DIFF: https://github.com/llvm/llvm-project/commit/bbc3e03032c5aa178c52e78a6c221ef0c6e249ca.diff

LOG: [X86] Remove some unused check-prefixes

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/2011-05-09-loaduse.ll
    llvm/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
    llvm/test/CodeGen/X86/2012-07-10-extload64.ll
    llvm/test/CodeGen/X86/3dnow-intrinsics.ll
    llvm/test/CodeGen/X86/8bit_cmov_of_trunc_promotion.ll
    llvm/test/CodeGen/X86/absolute-cmp.ll
    llvm/test/CodeGen/X86/viabs.ll
    llvm/test/CodeGen/X86/vp2intersect_multiple_pairs.ll
    llvm/test/CodeGen/X86/vshift-1.ll
    llvm/test/CodeGen/X86/vshift-2.ll
    llvm/test/CodeGen/X86/vshift-3.ll
    llvm/test/CodeGen/X86/vshift-4.ll
    llvm/test/CodeGen/X86/vshift-5.ll
    llvm/test/CodeGen/X86/vshift-6.ll
    llvm/test/CodeGen/X86/widen_arith-4.ll
    llvm/test/CodeGen/X86/widen_bitops-0.ll
    llvm/test/CodeGen/X86/widen_bitops-1.ll
    llvm/test/CodeGen/X86/widen_conv-3.ll
    llvm/test/CodeGen/X86/widen_conv-4.ll
    llvm/test/CodeGen/X86/widen_load-3.ll
    llvm/test/CodeGen/X86/widen_mul.ll
    llvm/test/CodeGen/X86/widened-broadcast.ll
    llvm/test/CodeGen/X86/win64_frame.ll
    llvm/test/CodeGen/X86/xaluo128.ll
    llvm/test/CodeGen/X86/xmulo.ll
    llvm/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
    llvm/test/CodeGen/X86/xor-icmp.ll
    llvm/test/CodeGen/X86/xor.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll b/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll
index 027e9b5a86f4..ed775f79f6a6 100644
--- a/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll
+++ b/llvm/test/CodeGen/X86/2011-05-09-loaduse.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-- -mcpu=corei7 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-- -mcpu=corei7 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-- -mcpu=corei7 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-- -mcpu=corei7 | FileCheck %s --check-prefix=X64
 
 define float @test(<4 x float>* %A) nounwind {
 ; X86-LABEL: test:

diff  --git a/llvm/test/CodeGen/X86/2011-12-8-bitcastintprom.ll b/llvm/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
index 44f2468f6fa0..b334c083aa9f 100644
--- a/llvm/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
+++ b/llvm/test/CodeGen/X86/2011-12-8-bitcastintprom.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.1 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE41
 
 ; Make sure that the conversion between v4i8 to v2i16 is not a simple bitcast.
 define void @prom_bug(<4 x i8> %t, i16* %p) {

diff  --git a/llvm/test/CodeGen/X86/2012-07-10-extload64.ll b/llvm/test/CodeGen/X86/2012-07-10-extload64.ll
index 85832639c149..d707ca1b6e77 100644
--- a/llvm/test/CodeGen/X86/2012-07-10-extload64.ll
+++ b/llvm/test/CodeGen/X86/2012-07-10-extload64.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-pc-win32 -mcpu=corei7 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=corei7 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-pc-win32 -mcpu=corei7 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-pc-win32 -mcpu=corei7 | FileCheck %s --check-prefix=X64
 
 define void @load_store(<4 x i16>* %in) {
 ; X86-LABEL: load_store:

diff  --git a/llvm/test/CodeGen/X86/3dnow-intrinsics.ll b/llvm/test/CodeGen/X86/3dnow-intrinsics.ll
index 611ba11c5102..a82f705b77d8 100644
--- a/llvm/test/CodeGen/X86/3dnow-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/3dnow-intrinsics.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+3dnow | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+3dnow | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+3dnow | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+3dnow | FileCheck %s --check-prefix=X64
 
 define <8 x i8> @test_pavgusb(x86_mmx %a.coerce, x86_mmx %b.coerce) nounwind readnone {
 ; X86-LABEL: test_pavgusb:

diff  --git a/llvm/test/CodeGen/X86/8bit_cmov_of_trunc_promotion.ll b/llvm/test/CodeGen/X86/8bit_cmov_of_trunc_promotion.ll
index 2cfc6e1163c9..db3c036dd261 100644
--- a/llvm/test/CodeGen/X86/8bit_cmov_of_trunc_promotion.ll
+++ b/llvm/test/CodeGen/X86/8bit_cmov_of_trunc_promotion.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=-cmov | FileCheck %s --check-prefixes=ALL,I386,I386-NOCMOV
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=ALL,I386,I386-CMOV
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-cmov | FileCheck %s --check-prefixes=ALL,I686,I686-NOCMOV
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=ALL,I686,I686-CMOV
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-cmov | FileCheck %s --check-prefixes=ALL,X86_64,X86_64-NOCMOV
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=ALL,X86_64,X86_64-CMOV
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=-cmov | FileCheck %s --check-prefixes=I386,I386-NOCMOV
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=I386,I386-CMOV
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-cmov | FileCheck %s --check-prefixes=I686,I686-NOCMOV
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=I686,I686-CMOV
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-cmov | FileCheck %s --check-prefixes=X86_64,X86_64-NOCMOV
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+cmov | FileCheck %s --check-prefixes=X86_64,X86_64-CMOV
 
 ; Values don't come from regs. All good.
 

diff  --git a/llvm/test/CodeGen/X86/absolute-cmp.ll b/llvm/test/CodeGen/X86/absolute-cmp.ll
index 99249e55dfea..4d8634d439bd 100644
--- a/llvm/test/CodeGen/X86/absolute-cmp.ll
+++ b/llvm/test/CodeGen/X86/absolute-cmp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -show-mc-encoding | FileCheck %s --check-prefixes=CHECK,NOPIC
-; RUN: llc -relocation-model=pic -show-mc-encoding < %s | FileCheck %s --check-prefixes=CHECK,PIC
+; RUN: llc < %s -show-mc-encoding | FileCheck %s --check-prefix=NOPIC
+; RUN: llc -relocation-model=pic -show-mc-encoding < %s | FileCheck %s --check-prefix=PIC
 
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"

diff  --git a/llvm/test/CodeGen/X86/viabs.ll b/llvm/test/CodeGen/X86/viabs.ll
index 52749e495bd8..588246d7ded6 100644
--- a/llvm/test/CodeGen/X86/viabs.ll
+++ b/llvm/test/CodeGen/X86/viabs.ll
@@ -1,11 +1,11 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2     | FileCheck %s --check-prefixes=SSE,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3    | FileCheck %s --check-prefixes=SSE,SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1   | FileCheck %s --check-prefixes=SSE,SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx      | FileCheck %s --check-prefixes=AVX,AVX1
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2     | FileCheck %s --check-prefixes=AVX,AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl --show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F,AVX512VL
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,+avx512bw --show-mc-encoding | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2     | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3    | FileCheck %s --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1   | FileCheck %s --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx      | FileCheck %s --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2     | FileCheck %s --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl --show-mc-encoding | FileCheck %s --check-prefixes=AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,+avx512bw --show-mc-encoding | FileCheck %s --check-prefixes=AVX512,AVX512BW
 
 define <4 x i32> @test_abs_gt_v4i32(<4 x i32> %a) nounwind {
 ; SSE2-LABEL: test_abs_gt_v4i32:

diff  --git a/llvm/test/CodeGen/X86/vp2intersect_multiple_pairs.ll b/llvm/test/CodeGen/X86/vp2intersect_multiple_pairs.ll
index 6891b96ad595..81c507175dff 100644
--- a/llvm/test/CodeGen/X86/vp2intersect_multiple_pairs.ll
+++ b/llvm/test/CodeGen/X86/vp2intersect_multiple_pairs.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefix=X64
 
 ; Test with more than four live mask pairs
 

diff  --git a/llvm/test/CodeGen/X86/vshift-1.ll b/llvm/test/CodeGen/X86/vshift-1.ll
index 40d304ca4c56..7062fa4ece71 100644
--- a/llvm/test/CodeGen/X86/vshift-1.ll
+++ b/llvm/test/CodeGen/X86/vshift-1.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; test vector shifts converted to proper SSE2 vector shifts when the shift
 ; amounts are the same.

diff  --git a/llvm/test/CodeGen/X86/vshift-2.ll b/llvm/test/CodeGen/X86/vshift-2.ll
index bfe0d3f5bbc0..e5af6661a2e0 100644
--- a/llvm/test/CodeGen/X86/vshift-2.ll
+++ b/llvm/test/CodeGen/X86/vshift-2.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; test vector shifts converted to proper SSE2 vector shifts when the shift
 ; amounts are the same.

diff  --git a/llvm/test/CodeGen/X86/vshift-3.ll b/llvm/test/CodeGen/X86/vshift-3.ll
index 92781894b334..4d6815c8b128 100644
--- a/llvm/test/CodeGen/X86/vshift-3.ll
+++ b/llvm/test/CodeGen/X86/vshift-3.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; test vector shifts converted to proper SSE2 vector shifts when the shift
 ; amounts are the same.

diff  --git a/llvm/test/CodeGen/X86/vshift-4.ll b/llvm/test/CodeGen/X86/vshift-4.ll
index ca5cc902b6ef..ca76f25c7271 100644
--- a/llvm/test/CodeGen/X86/vshift-4.ll
+++ b/llvm/test/CodeGen/X86/vshift-4.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; test vector shifts converted to proper SSE2 vector shifts when the shift
 ; amounts are the same when using a shuffle splat.

diff  --git a/llvm/test/CodeGen/X86/vshift-5.ll b/llvm/test/CodeGen/X86/vshift-5.ll
index b3cab10f4c1f..dcf112b00d44 100644
--- a/llvm/test/CodeGen/X86/vshift-5.ll
+++ b/llvm/test/CodeGen/X86/vshift-5.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; When loading the shift amount from memory, avoid generating the splat.
 

diff  --git a/llvm/test/CodeGen/X86/vshift-6.ll b/llvm/test/CodeGen/X86/vshift-6.ll
index 63f5ccac8a75..471ea5ad5c93 100644
--- a/llvm/test/CodeGen/X86/vshift-6.ll
+++ b/llvm/test/CodeGen/X86/vshift-6.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; This test makes sure that the compiler does not crash with an
 ; assertion failure when trying to fold a vector shift left

diff  --git a/llvm/test/CodeGen/X86/widen_arith-4.ll b/llvm/test/CodeGen/X86/widen_arith-4.ll
index 490783ef6579..c878517c0eb5 100644
--- a/llvm/test/CodeGen/X86/widen_arith-4.ll
+++ b/llvm/test/CodeGen/X86/widen_arith-4.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE41
 
 ; Widen a v5i16 to v8i16 to do a vector sub and multiple
 

diff  --git a/llvm/test/CodeGen/X86/widen_bitops-0.ll b/llvm/test/CodeGen/X86/widen_bitops-0.ll
index 02a665d15a61..7d91502694ce 100644
--- a/llvm/test/CodeGen/X86/widen_bitops-0.ll
+++ b/llvm/test/CodeGen/X86/widen_bitops-0.ll
@@ -1,23 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE42
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE42
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ;
 ; AND/XOR/OR i24 as v3i8
 ;
 
 define i24 @and_i24_as_v3i8(i24 %a, i24 %b) nounwind {
-; X32-SSE-LABEL: and_i24_as_v3i8:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
+; X86-LABEL: and_i24_as_v3i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: and_i24_as_v3i8:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    andl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X64-LABEL: and_i24_as_v3i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i24 %a to <3 x i8>
   %2 = bitcast i24 %b to <3 x i8>
   %3 = and <3 x i8> %1, %2
@@ -26,17 +26,17 @@ define i24 @and_i24_as_v3i8(i24 %a, i24 %b) nounwind {
 }
 
 define i24 @xor_i24_as_v3i8(i24 %a, i24 %b) nounwind {
-; X32-SSE-LABEL: xor_i24_as_v3i8:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
+; X86-LABEL: xor_i24_as_v3i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: xor_i24_as_v3i8:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    xorl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X64-LABEL: xor_i24_as_v3i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i24 %a to <3 x i8>
   %2 = bitcast i24 %b to <3 x i8>
   %3 = xor <3 x i8> %1, %2
@@ -45,17 +45,17 @@ define i24 @xor_i24_as_v3i8(i24 %a, i24 %b) nounwind {
 }
 
 define i24 @or_i24_as_v3i8(i24 %a, i24 %b) nounwind {
-; X32-SSE-LABEL: or_i24_as_v3i8:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
+; X86-LABEL: or_i24_as_v3i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: or_i24_as_v3i8:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    orl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X64-LABEL: or_i24_as_v3i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i24 %a to <3 x i8>
   %2 = bitcast i24 %b to <3 x i8>
   %3 = or <3 x i8> %1, %2
@@ -68,17 +68,17 @@ define i24 @or_i24_as_v3i8(i24 %a, i24 %b) nounwind {
 ;
 
 define i24 @and_i24_as_v8i3(i24 %a, i24 %b) nounwind {
-; X32-SSE-LABEL: and_i24_as_v8i3:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
+; X86-LABEL: and_i24_as_v8i3:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: and_i24_as_v8i3:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    andl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X64-LABEL: and_i24_as_v8i3:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i24 %a to <8 x i3>
   %2 = bitcast i24 %b to <8 x i3>
   %3 = and <8 x i3> %1, %2
@@ -87,17 +87,17 @@ define i24 @and_i24_as_v8i3(i24 %a, i24 %b) nounwind {
 }
 
 define i24 @xor_i24_as_v8i3(i24 %a, i24 %b) nounwind {
-; X32-SSE-LABEL: xor_i24_as_v8i3:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
+; X86-LABEL: xor_i24_as_v8i3:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: xor_i24_as_v8i3:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    xorl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X64-LABEL: xor_i24_as_v8i3:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i24 %a to <8 x i3>
   %2 = bitcast i24 %b to <8 x i3>
   %3 = xor <8 x i3> %1, %2
@@ -106,17 +106,17 @@ define i24 @xor_i24_as_v8i3(i24 %a, i24 %b) nounwind {
 }
 
 define i24 @or_i24_as_v8i3(i24 %a, i24 %b) nounwind {
-; X32-SSE-LABEL: or_i24_as_v8i3:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
+; X86-LABEL: or_i24_as_v8i3:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: or_i24_as_v8i3:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    orl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X64-LABEL: or_i24_as_v8i3:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i24 %a to <8 x i3>
   %2 = bitcast i24 %b to <8 x i3>
   %3 = or <8 x i3> %1, %2
@@ -129,39 +129,39 @@ define i24 @or_i24_as_v8i3(i24 %a, i24 %b) nounwind {
 ;
 
 define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
-; X32-SSE-LABEL: and_v3i8_as_i24:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm1
-; X32-SSE-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm1
-; X32-SSE-NEXT:    pand %xmm0, %xmm1
-; X32-SSE-NEXT:    movd %xmm1, %eax
-; X32-SSE-NEXT:    pextrb $1, %xmm1, %edx
-; X32-SSE-NEXT:    pextrb $2, %xmm1, %ecx
-; X32-SSE-NEXT:    # kill: def $al killed $al killed $eax
-; X32-SSE-NEXT:    # kill: def $dl killed $dl killed $edx
-; X32-SSE-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X32-SSE-NEXT:    retl
+; X86-LABEL: and_v3i8_as_i24:
+; X86:       # %bb.0:
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    pand %xmm0, %xmm1
+; X86-NEXT:    movd %xmm1, %eax
+; X86-NEXT:    pextrb $1, %xmm1, %edx
+; X86-NEXT:    pextrb $2, %xmm1, %ecx
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    # kill: def $dl killed $dl killed $edx
+; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: and_v3i8_as_i24:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movd %ecx, %xmm0
-; X64-SSE-NEXT:    pinsrb $1, %r8d, %xmm0
-; X64-SSE-NEXT:    pinsrb $2, %r9d, %xmm0
-; X64-SSE-NEXT:    movd %edi, %xmm1
-; X64-SSE-NEXT:    pinsrb $1, %esi, %xmm1
-; X64-SSE-NEXT:    pinsrb $2, %edx, %xmm1
-; X64-SSE-NEXT:    pand %xmm0, %xmm1
-; X64-SSE-NEXT:    movd %xmm1, %eax
-; X64-SSE-NEXT:    pextrb $1, %xmm1, %edx
-; X64-SSE-NEXT:    pextrb $2, %xmm1, %ecx
-; X64-SSE-NEXT:    # kill: def $al killed $al killed $eax
-; X64-SSE-NEXT:    # kill: def $dl killed $dl killed $edx
-; X64-SSE-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-SSE-NEXT:    retq
+; X64-LABEL: and_v3i8_as_i24:
+; X64:       # %bb.0:
+; X64-NEXT:    movd %ecx, %xmm0
+; X64-NEXT:    pinsrb $1, %r8d, %xmm0
+; X64-NEXT:    pinsrb $2, %r9d, %xmm0
+; X64-NEXT:    movd %edi, %xmm1
+; X64-NEXT:    pinsrb $1, %esi, %xmm1
+; X64-NEXT:    pinsrb $2, %edx, %xmm1
+; X64-NEXT:    pand %xmm0, %xmm1
+; X64-NEXT:    movd %xmm1, %eax
+; X64-NEXT:    pextrb $1, %xmm1, %edx
+; X64-NEXT:    pextrb $2, %xmm1, %ecx
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    # kill: def $dl killed $dl killed $edx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    retq
   %1 = bitcast <3 x i8> %a to i24
   %2 = bitcast <3 x i8> %b to i24
   %3 = and i24 %1, %2
@@ -170,39 +170,39 @@ define <3 x i8> @and_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
 }
 
 define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
-; X32-SSE-LABEL: xor_v3i8_as_i24:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm1
-; X32-SSE-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm1
-; X32-SSE-NEXT:    pxor %xmm0, %xmm1
-; X32-SSE-NEXT:    movd %xmm1, %eax
-; X32-SSE-NEXT:    pextrb $1, %xmm1, %edx
-; X32-SSE-NEXT:    pextrb $2, %xmm1, %ecx
-; X32-SSE-NEXT:    # kill: def $al killed $al killed $eax
-; X32-SSE-NEXT:    # kill: def $dl killed $dl killed $edx
-; X32-SSE-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X32-SSE-NEXT:    retl
+; X86-LABEL: xor_v3i8_as_i24:
+; X86:       # %bb.0:
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    pxor %xmm0, %xmm1
+; X86-NEXT:    movd %xmm1, %eax
+; X86-NEXT:    pextrb $1, %xmm1, %edx
+; X86-NEXT:    pextrb $2, %xmm1, %ecx
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    # kill: def $dl killed $dl killed $edx
+; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: xor_v3i8_as_i24:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movd %ecx, %xmm0
-; X64-SSE-NEXT:    pinsrb $1, %r8d, %xmm0
-; X64-SSE-NEXT:    pinsrb $2, %r9d, %xmm0
-; X64-SSE-NEXT:    movd %edi, %xmm1
-; X64-SSE-NEXT:    pinsrb $1, %esi, %xmm1
-; X64-SSE-NEXT:    pinsrb $2, %edx, %xmm1
-; X64-SSE-NEXT:    pxor %xmm0, %xmm1
-; X64-SSE-NEXT:    movd %xmm1, %eax
-; X64-SSE-NEXT:    pextrb $1, %xmm1, %edx
-; X64-SSE-NEXT:    pextrb $2, %xmm1, %ecx
-; X64-SSE-NEXT:    # kill: def $al killed $al killed $eax
-; X64-SSE-NEXT:    # kill: def $dl killed $dl killed $edx
-; X64-SSE-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-SSE-NEXT:    retq
+; X64-LABEL: xor_v3i8_as_i24:
+; X64:       # %bb.0:
+; X64-NEXT:    movd %ecx, %xmm0
+; X64-NEXT:    pinsrb $1, %r8d, %xmm0
+; X64-NEXT:    pinsrb $2, %r9d, %xmm0
+; X64-NEXT:    movd %edi, %xmm1
+; X64-NEXT:    pinsrb $1, %esi, %xmm1
+; X64-NEXT:    pinsrb $2, %edx, %xmm1
+; X64-NEXT:    pxor %xmm0, %xmm1
+; X64-NEXT:    movd %xmm1, %eax
+; X64-NEXT:    pextrb $1, %xmm1, %edx
+; X64-NEXT:    pextrb $2, %xmm1, %ecx
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    # kill: def $dl killed $dl killed $edx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    retq
   %1 = bitcast <3 x i8> %a to i24
   %2 = bitcast <3 x i8> %b to i24
   %3 = xor i24 %1, %2
@@ -211,39 +211,39 @@ define <3 x i8> @xor_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
 }
 
 define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
-; X32-SSE-LABEL: or_v3i8_as_i24:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm0
-; X32-SSE-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X32-SSE-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm1
-; X32-SSE-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm1
-; X32-SSE-NEXT:    por %xmm0, %xmm1
-; X32-SSE-NEXT:    movd %xmm1, %eax
-; X32-SSE-NEXT:    pextrb $1, %xmm1, %edx
-; X32-SSE-NEXT:    pextrb $2, %xmm1, %ecx
-; X32-SSE-NEXT:    # kill: def $al killed $al killed $eax
-; X32-SSE-NEXT:    # kill: def $dl killed $dl killed $edx
-; X32-SSE-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X32-SSE-NEXT:    retl
+; X86-LABEL: or_v3i8_as_i24:
+; X86:       # %bb.0:
+; X86-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm0
+; X86-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X86-NEXT:    pinsrb $1, {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    pinsrb $2, {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    por %xmm0, %xmm1
+; X86-NEXT:    movd %xmm1, %eax
+; X86-NEXT:    pextrb $1, %xmm1, %edx
+; X86-NEXT:    pextrb $2, %xmm1, %ecx
+; X86-NEXT:    # kill: def $al killed $al killed $eax
+; X86-NEXT:    # kill: def $dl killed $dl killed $edx
+; X86-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: or_v3i8_as_i24:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movd %ecx, %xmm0
-; X64-SSE-NEXT:    pinsrb $1, %r8d, %xmm0
-; X64-SSE-NEXT:    pinsrb $2, %r9d, %xmm0
-; X64-SSE-NEXT:    movd %edi, %xmm1
-; X64-SSE-NEXT:    pinsrb $1, %esi, %xmm1
-; X64-SSE-NEXT:    pinsrb $2, %edx, %xmm1
-; X64-SSE-NEXT:    por %xmm0, %xmm1
-; X64-SSE-NEXT:    movd %xmm1, %eax
-; X64-SSE-NEXT:    pextrb $1, %xmm1, %edx
-; X64-SSE-NEXT:    pextrb $2, %xmm1, %ecx
-; X64-SSE-NEXT:    # kill: def $al killed $al killed $eax
-; X64-SSE-NEXT:    # kill: def $dl killed $dl killed $edx
-; X64-SSE-NEXT:    # kill: def $cl killed $cl killed $ecx
-; X64-SSE-NEXT:    retq
+; X64-LABEL: or_v3i8_as_i24:
+; X64:       # %bb.0:
+; X64-NEXT:    movd %ecx, %xmm0
+; X64-NEXT:    pinsrb $1, %r8d, %xmm0
+; X64-NEXT:    pinsrb $2, %r9d, %xmm0
+; X64-NEXT:    movd %edi, %xmm1
+; X64-NEXT:    pinsrb $1, %esi, %xmm1
+; X64-NEXT:    pinsrb $2, %edx, %xmm1
+; X64-NEXT:    por %xmm0, %xmm1
+; X64-NEXT:    movd %xmm1, %eax
+; X64-NEXT:    pextrb $1, %xmm1, %edx
+; X64-NEXT:    pextrb $2, %xmm1, %ecx
+; X64-NEXT:    # kill: def $al killed $al killed $eax
+; X64-NEXT:    # kill: def $dl killed $dl killed $edx
+; X64-NEXT:    # kill: def $cl killed $cl killed $ecx
+; X64-NEXT:    retq
   %1 = bitcast <3 x i8> %a to i24
   %2 = bitcast <3 x i8> %b to i24
   %3 = or i24 %1, %2
@@ -256,15 +256,15 @@ define <3 x i8> @or_v3i8_as_i24(<3 x i8> %a, <3 x i8> %b) nounwind {
 ;
 
 define <8 x i3> @and_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
-; X32-SSE-LABEL: and_v8i3_as_i24:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    andps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
+; X86-LABEL: and_v8i3_as_i24:
+; X86:       # %bb.0:
+; X86-NEXT:    andps %xmm1, %xmm0
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: and_v8i3_as_i24:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    andps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-LABEL: and_v8i3_as_i24:
+; X64:       # %bb.0:
+; X64-NEXT:    andps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <8 x i3> %a to i24
   %2 = bitcast <8 x i3> %b to i24
   %3 = and i24 %1, %2
@@ -273,15 +273,15 @@ define <8 x i3> @and_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
 }
 
 define <8 x i3> @xor_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
-; X32-SSE-LABEL: xor_v8i3_as_i24:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    xorps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
+; X86-LABEL: xor_v8i3_as_i24:
+; X86:       # %bb.0:
+; X86-NEXT:    xorps %xmm1, %xmm0
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: xor_v8i3_as_i24:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    xorps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-LABEL: xor_v8i3_as_i24:
+; X64:       # %bb.0:
+; X64-NEXT:    xorps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <8 x i3> %a to i24
   %2 = bitcast <8 x i3> %b to i24
   %3 = xor i24 %1, %2
@@ -290,15 +290,15 @@ define <8 x i3> @xor_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
 }
 
 define <8 x i3> @or_v8i3_as_i24(<8 x i3> %a, <8 x i3> %b) nounwind {
-; X32-SSE-LABEL: or_v8i3_as_i24:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    orps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
+; X86-LABEL: or_v8i3_as_i24:
+; X86:       # %bb.0:
+; X86-NEXT:    orps %xmm1, %xmm0
+; X86-NEXT:    retl
 ;
-; X64-SSE-LABEL: or_v8i3_as_i24:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    orps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X64-LABEL: or_v8i3_as_i24:
+; X64:       # %bb.0:
+; X64-NEXT:    orps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <8 x i3> %a to i24
   %2 = bitcast <8 x i3> %b to i24
   %3 = or i24 %1, %2

diff  --git a/llvm/test/CodeGen/X86/widen_bitops-1.ll b/llvm/test/CodeGen/X86/widen_bitops-1.ll
index 3c97efecdc33..dde4902b4486 100644
--- a/llvm/test/CodeGen/X86/widen_bitops-1.ll
+++ b/llvm/test/CodeGen/X86/widen_bitops-1.ll
@@ -1,23 +1,23 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X32-SSE --check-prefix=X32-SSE42
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64-SSE --check-prefix=X64-SSE42
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64
 
 ;
 ; AND/XOR/OR i32 as v4i8
 ;
 
 define i32 @and_i32_as_v4i8(i32 %a, i32 %b) nounwind {
-; X32-SSE-LABEL: and_i32_as_v4i8:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: and_i32_as_v4i8:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    andl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X86-LABEL: and_i32_as_v4i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: and_i32_as_v4i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i32 %a to <4 x i8>
   %2 = bitcast i32 %b to <4 x i8>
   %3 = and <4 x i8> %1, %2
@@ -26,17 +26,17 @@ define i32 @and_i32_as_v4i8(i32 %a, i32 %b) nounwind {
 }
 
 define i32 @xor_i32_as_v4i8(i32 %a, i32 %b) nounwind {
-; X32-SSE-LABEL: xor_i32_as_v4i8:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: xor_i32_as_v4i8:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    xorl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X86-LABEL: xor_i32_as_v4i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_i32_as_v4i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i32 %a to <4 x i8>
   %2 = bitcast i32 %b to <4 x i8>
   %3 = xor <4 x i8> %1, %2
@@ -45,17 +45,17 @@ define i32 @xor_i32_as_v4i8(i32 %a, i32 %b) nounwind {
 }
 
 define i32 @or_i32_as_v4i8(i32 %a, i32 %b) nounwind {
-; X32-SSE-LABEL: or_i32_as_v4i8:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: or_i32_as_v4i8:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    orl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X86-LABEL: or_i32_as_v4i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_i32_as_v4i8:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i32 %a to <4 x i8>
   %2 = bitcast i32 %b to <4 x i8>
   %3 = or <4 x i8> %1, %2
@@ -68,17 +68,17 @@ define i32 @or_i32_as_v4i8(i32 %a, i32 %b) nounwind {
 ;
 
 define i32 @and_i32_as_v8i4(i32 %a, i32 %b) nounwind {
-; X32-SSE-LABEL: and_i32_as_v8i4:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    andl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: and_i32_as_v8i4:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    andl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X86-LABEL: and_i32_as_v8i4:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: and_i32_as_v8i4:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    andl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i32 %a to <8 x i4>
   %2 = bitcast i32 %b to <8 x i4>
   %3 = and <8 x i4> %1, %2
@@ -87,17 +87,17 @@ define i32 @and_i32_as_v8i4(i32 %a, i32 %b) nounwind {
 }
 
 define i32 @xor_i32_as_v8i4(i32 %a, i32 %b) nounwind {
-; X32-SSE-LABEL: xor_i32_as_v8i4:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    xorl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: xor_i32_as_v8i4:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    xorl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X86-LABEL: xor_i32_as_v8i4:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_i32_as_v8i4:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    xorl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i32 %a to <8 x i4>
   %2 = bitcast i32 %b to <8 x i4>
   %3 = xor <8 x i4> %1, %2
@@ -106,17 +106,17 @@ define i32 @xor_i32_as_v8i4(i32 %a, i32 %b) nounwind {
 }
 
 define i32 @or_i32_as_v8i4(i32 %a, i32 %b) nounwind {
-; X32-SSE-LABEL: or_i32_as_v8i4:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    orl {{[0-9]+}}(%esp), %eax
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: or_i32_as_v8i4:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    movl %edi, %eax
-; X64-SSE-NEXT:    orl %esi, %eax
-; X64-SSE-NEXT:    retq
+; X86-LABEL: or_i32_as_v8i4:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_i32_as_v8i4:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %edi, %eax
+; X64-NEXT:    orl %esi, %eax
+; X64-NEXT:    retq
   %1 = bitcast i32 %a to <8 x i4>
   %2 = bitcast i32 %b to <8 x i4>
   %3 = or <8 x i4> %1, %2
@@ -129,15 +129,15 @@ define i32 @or_i32_as_v8i4(i32 %a, i32 %b) nounwind {
 ;
 
 define <4 x i8> @and_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
-; X32-SSE-LABEL: and_v4i8_as_i32:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    andps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: and_v4i8_as_i32:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    andps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-LABEL: and_v4i8_as_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    andps %xmm1, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: and_v4i8_as_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    andps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <4 x i8> %a to i32
   %2 = bitcast <4 x i8> %b to i32
   %3 = and i32 %1, %2
@@ -146,15 +146,15 @@ define <4 x i8> @and_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
 }
 
 define <4 x i8> @xor_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
-; X32-SSE-LABEL: xor_v4i8_as_i32:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    xorps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: xor_v4i8_as_i32:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    xorps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-LABEL: xor_v4i8_as_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    xorps %xmm1, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_v4i8_as_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    xorps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <4 x i8> %a to i32
   %2 = bitcast <4 x i8> %b to i32
   %3 = xor i32 %1, %2
@@ -163,15 +163,15 @@ define <4 x i8> @xor_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
 }
 
 define <4 x i8> @or_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
-; X32-SSE-LABEL: or_v4i8_as_i32:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    orps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: or_v4i8_as_i32:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    orps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-LABEL: or_v4i8_as_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    orps %xmm1, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_v4i8_as_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    orps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <4 x i8> %a to i32
   %2 = bitcast <4 x i8> %b to i32
   %3 = or i32 %1, %2
@@ -184,15 +184,15 @@ define <4 x i8> @or_v4i8_as_i32(<4 x i8> %a, <4 x i8> %b) nounwind {
 ;
 
 define <8 x i4> @and_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
-; X32-SSE-LABEL: and_v8i4_as_i32:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    andps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: and_v8i4_as_i32:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    andps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-LABEL: and_v8i4_as_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    andps %xmm1, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: and_v8i4_as_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    andps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <8 x i4> %a to i32
   %2 = bitcast <8 x i4> %b to i32
   %3 = and i32 %1, %2
@@ -201,15 +201,15 @@ define <8 x i4> @and_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
 }
 
 define <8 x i4> @xor_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
-; X32-SSE-LABEL: xor_v8i4_as_i32:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    xorps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: xor_v8i4_as_i32:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    xorps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-LABEL: xor_v8i4_as_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    xorps %xmm1, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: xor_v8i4_as_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    xorps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <8 x i4> %a to i32
   %2 = bitcast <8 x i4> %b to i32
   %3 = xor i32 %1, %2
@@ -218,15 +218,15 @@ define <8 x i4> @xor_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
 }
 
 define <8 x i4> @or_v8i4_as_i32(<8 x i4> %a, <8 x i4> %b) nounwind {
-; X32-SSE-LABEL: or_v8i4_as_i32:
-; X32-SSE:       # %bb.0:
-; X32-SSE-NEXT:    orps %xmm1, %xmm0
-; X32-SSE-NEXT:    retl
-;
-; X64-SSE-LABEL: or_v8i4_as_i32:
-; X64-SSE:       # %bb.0:
-; X64-SSE-NEXT:    orps %xmm1, %xmm0
-; X64-SSE-NEXT:    retq
+; X86-LABEL: or_v8i4_as_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    orps %xmm1, %xmm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: or_v8i4_as_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    orps %xmm1, %xmm0
+; X64-NEXT:    retq
   %1 = bitcast <8 x i4> %a to i32
   %2 = bitcast <8 x i4> %b to i32
   %3 = or i32 %1, %2

diff  --git a/llvm/test/CodeGen/X86/widen_conv-3.ll b/llvm/test/CodeGen/X86/widen_conv-3.ll
index 4ebfca6a3a00..6b5099a38ed2 100644
--- a/llvm/test/CodeGen/X86/widen_conv-3.ll
+++ b/llvm/test/CodeGen/X86/widen_conv-3.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE42
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE42
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86-SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64-SSE42
 
 ; sign to float v2i16 to v2f32
 

diff  --git a/llvm/test/CodeGen/X86/widen_conv-4.ll b/llvm/test/CodeGen/X86/widen_conv-4.ll
index 11eaa7c31c7b..152596e27608 100644
--- a/llvm/test/CodeGen/X86/widen_conv-4.ll
+++ b/llvm/test/CodeGen/X86/widen_conv-4.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
-; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE42
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE42
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86-SSE42
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64-SSE42
 
 ; unsigned to float v7i16 to v7f32
 

diff  --git a/llvm/test/CodeGen/X86/widen_load-3.ll b/llvm/test/CodeGen/X86/widen_load-3.ll
index f456c486990f..e4958de5f5a9 100644
--- a/llvm/test/CodeGen/X86/widen_load-3.ll
+++ b/llvm/test/CodeGen/X86/widen_load-3.ll
@@ -1,10 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE
-; RUN: llc < %s -mtriple=i686-linux -mattr=+avx    | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX1
-; RUN: llc < %s -mtriple=i686-linux -mattr=+avx2   | FileCheck %s --check-prefix=X86 --check-prefix=X86-AVX --check-prefix=X86-AVX2
-; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
-; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx    | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
-; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx2   | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
+; RUN: llc < %s -mtriple=i686-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X86-SSE
+; RUN: llc < %s -mtriple=i686-linux -mattr=+avx    | FileCheck %s --check-prefix=X86-AVX
+; RUN: llc < %s -mtriple=i686-linux -mattr=+avx2   | FileCheck %s --check-prefix=X86-AVX
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse4.2 | FileCheck %s --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx    | FileCheck %s --check-prefix=X64-AVX
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx2   | FileCheck %s --check-prefix=X64-AVX
 
 ; PR27708
 

diff  --git a/llvm/test/CodeGen/X86/widen_mul.ll b/llvm/test/CodeGen/X86/widen_mul.ll
index 17dbf73c860b..6c217c30721a 100644
--- a/llvm/test/CodeGen/X86/widen_mul.ll
+++ b/llvm/test/CodeGen/X86/widen_mul.ll
@@ -1,9 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX
 
 ; Test multiplies of various narrow types.
 

diff  --git a/llvm/test/CodeGen/X86/widened-broadcast.ll b/llvm/test/CodeGen/X86/widened-broadcast.ll
index 033f5b596f90..04414eeac5df 100644
--- a/llvm/test/CodeGen/X86/widened-broadcast.ll
+++ b/llvm/test/CodeGen/X86/widened-broadcast.ll
@@ -3,7 +3,7 @@
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE42
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
 
 ; Widened shuffle broadcast loads
 

diff  --git a/llvm/test/CodeGen/X86/win64_frame.ll b/llvm/test/CodeGen/X86/win64_frame.ll
index 9158b19b2f93..ee917615e5b9 100644
--- a/llvm/test/CodeGen/X86/win64_frame.ll
+++ b/llvm/test/CodeGen/X86/win64_frame.ll
@@ -1,85 +1,85 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-pc-win32              | FileCheck %s --check-prefix=ALL --check-prefix=PUSHF
-; RUN: llc < %s -mtriple=x86_64-pc-win32 -mattr=+sahf | FileCheck %s --check-prefix=ALL --check-prefix=SAHF
+; RUN: llc < %s -mtriple=x86_64-pc-win32              | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-pc-win32 -mattr=+sahf | FileCheck %s
 
 define i32 @f1(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) "frame-pointer"="all" {
-; ALL-LABEL: f1:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    movq %rsp, %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 0
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    movl 48(%rbp), %eax
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    movq %rsp, %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 0
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    movl 48(%rbp), %eax
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   ret i32 %p5
 }
 
 define void @f2(i32 %p, ...) "frame-pointer"="all" {
-; ALL-LABEL: f2:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    pushq %rax
-; ALL-NEXT:    .seh_stackalloc 8
-; ALL-NEXT:    movq %rsp, %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 0
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    movq %rdx, 32(%rbp)
-; ALL-NEXT:    movq %r8, 40(%rbp)
-; ALL-NEXT:    movq %r9, 48(%rbp)
-; ALL-NEXT:    leaq 32(%rbp), %rax
-; ALL-NEXT:    movq %rax, (%rbp)
-; ALL-NEXT:    addq $8, %rsp
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    pushq %rax
+; CHECK-NEXT:    .seh_stackalloc 8
+; CHECK-NEXT:    movq %rsp, %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 0
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    movq %rdx, 32(%rbp)
+; CHECK-NEXT:    movq %r8, 40(%rbp)
+; CHECK-NEXT:    movq %r9, 48(%rbp)
+; CHECK-NEXT:    leaq 32(%rbp), %rax
+; CHECK-NEXT:    movq %rax, (%rbp)
+; CHECK-NEXT:    addq $8, %rsp
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   %ap = alloca i8, align 8
   call void @llvm.va_start(i8* %ap)
   ret void
 }
 
 define i8* @f3() "frame-pointer"="all" {
-; ALL-LABEL: f3:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    movq %rsp, %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 0
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    movq 8(%rbp), %rax
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    movq %rsp, %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 0
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    movq 8(%rbp), %rax
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   %ra = call i8* @llvm.returnaddress(i32 0)
   ret i8* %ra
 }
 
 define i8* @f4() "frame-pointer"="all" {
-; ALL-LABEL: f4:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    subq $304, %rsp # imm = 0x130
-; ALL-NEXT:    .seh_stackalloc 304
-; ALL-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 128
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    movq 184(%rbp), %rax
-; ALL-NEXT:    addq $304, %rsp # imm = 0x130
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    subq $304, %rsp # imm = 0x130
+; CHECK-NEXT:    .seh_stackalloc 304
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 128
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    movq 184(%rbp), %rax
+; CHECK-NEXT:    addq $304, %rsp # imm = 0x130
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   alloca [300 x i8]
   %ra = call i8* @llvm.returnaddress(i32 0)
   ret i8* %ra
@@ -88,24 +88,24 @@ define i8* @f4() "frame-pointer"="all" {
 declare void @external(i8*)
 
 define void @f5() "frame-pointer"="all" {
-; ALL-LABEL: f5:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    subq $336, %rsp # imm = 0x150
-; ALL-NEXT:    .seh_stackalloc 336
-; ALL-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 128
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    leaq -92(%rbp), %rcx
-; ALL-NEXT:    callq external
-; ALL-NEXT:    nop
-; ALL-NEXT:    addq $336, %rsp # imm = 0x150
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f5:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    subq $336, %rsp # imm = 0x150
+; CHECK-NEXT:    .seh_stackalloc 336
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 128
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    leaq -92(%rbp), %rcx
+; CHECK-NEXT:    callq external
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addq $336, %rsp # imm = 0x150
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   %a = alloca [300 x i8]
   %gep = getelementptr [300 x i8], [300 x i8]* %a, i32 0, i32 0
   call void @external(i8* %gep)
@@ -113,24 +113,24 @@ define void @f5() "frame-pointer"="all" {
 }
 
 define void @f6(i32 %p, ...) "frame-pointer"="all" {
-; ALL-LABEL: f6:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    subq $336, %rsp # imm = 0x150
-; ALL-NEXT:    .seh_stackalloc 336
-; ALL-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 128
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    leaq -92(%rbp), %rcx
-; ALL-NEXT:    callq external
-; ALL-NEXT:    nop
-; ALL-NEXT:    addq $336, %rsp # imm = 0x150
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f6:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    subq $336, %rsp # imm = 0x150
+; CHECK-NEXT:    .seh_stackalloc 336
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 128
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    leaq -92(%rbp), %rcx
+; CHECK-NEXT:    callq external
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    addq $336, %rsp # imm = 0x150
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   %a = alloca [300 x i8]
   %gep = getelementptr [300 x i8], [300 x i8]* %a, i32 0, i32 0
   call void @external(i8* %gep)
@@ -138,62 +138,62 @@ define void @f6(i32 %p, ...) "frame-pointer"="all" {
 }
 
 define i32 @f7(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) "frame-pointer"="all" {
-; ALL-LABEL: f7:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    subq $304, %rsp # imm = 0x130
-; ALL-NEXT:    .seh_stackalloc 304
-; ALL-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 128
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    andq $-64, %rsp
-; ALL-NEXT:    movl 224(%rbp), %eax
-; ALL-NEXT:    leaq 176(%rbp), %rsp
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f7:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    subq $304, %rsp # imm = 0x130
+; CHECK-NEXT:    .seh_stackalloc 304
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 128
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    andq $-64, %rsp
+; CHECK-NEXT:    movl 224(%rbp), %eax
+; CHECK-NEXT:    leaq 176(%rbp), %rsp
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   alloca [300 x i8], align 64
   ret i32 %e
 }
 
 define i32 @f8(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) "frame-pointer"="all" {
-; ALL-LABEL: f8:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    pushq %rsi
-; ALL-NEXT:    .seh_pushreg %rsi
-; ALL-NEXT:    pushq %rbx
-; ALL-NEXT:    .seh_pushreg %rbx
-; ALL-NEXT:    subq $352, %rsp # imm = 0x160
-; ALL-NEXT:    .seh_stackalloc 352
-; ALL-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 128
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    andq $-64, %rsp
-; ALL-NEXT:    movq %rsp, %rbx
-; ALL-NEXT:    movl 288(%rbp), %esi
-; ALL-NEXT:    movl %ecx, %eax
-; ALL-NEXT:    leaq 15(,%rax,4), %rax
-; ALL-NEXT:    andq $-16, %rax
-; ALL-NEXT:    callq __chkstk
-; ALL-NEXT:    subq %rax, %rsp
-; ALL-NEXT:    subq $32, %rsp
-; ALL-NEXT:    movq %rbx, %rcx
-; ALL-NEXT:    callq external
-; ALL-NEXT:    addq $32, %rsp
-; ALL-NEXT:    movl %esi, %eax
-; ALL-NEXT:    leaq 224(%rbp), %rsp
-; ALL-NEXT:    popq %rbx
-; ALL-NEXT:    popq %rsi
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    pushq %rsi
+; CHECK-NEXT:    .seh_pushreg %rsi
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    .seh_pushreg %rbx
+; CHECK-NEXT:    subq $352, %rsp # imm = 0x160
+; CHECK-NEXT:    .seh_stackalloc 352
+; CHECK-NEXT:    leaq {{[0-9]+}}(%rsp), %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 128
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    andq $-64, %rsp
+; CHECK-NEXT:    movq %rsp, %rbx
+; CHECK-NEXT:    movl 288(%rbp), %esi
+; CHECK-NEXT:    movl %ecx, %eax
+; CHECK-NEXT:    leaq 15(,%rax,4), %rax
+; CHECK-NEXT:    andq $-16, %rax
+; CHECK-NEXT:    callq __chkstk
+; CHECK-NEXT:    subq %rax, %rsp
+; CHECK-NEXT:    subq $32, %rsp
+; CHECK-NEXT:    movq %rbx, %rcx
+; CHECK-NEXT:    callq external
+; CHECK-NEXT:    addq $32, %rsp
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    leaq 224(%rbp), %rsp
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    popq %rsi
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   %alloca = alloca [300 x i8], align 64
   alloca i32, i32 %a
   %gep = getelementptr [300 x i8], [300 x i8]* %alloca, i32 0, i32 0
@@ -202,20 +202,20 @@ define i32 @f8(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) "frame-pointer"="all" {
 }
 
 define i64 @f9() {
-; ALL-LABEL: f9:
-; ALL:       # %bb.0: # %entry
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    movq %rsp, %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 0
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    pushfq
-; ALL-NEXT:    popq %rax
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f9:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    movq %rsp, %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 0
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    pushfq
+; CHECK-NEXT:    popq %rax
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
 entry:
   %call = call i64 @llvm.x86.flags.read.u64()
   ret i64 %call
@@ -224,29 +224,29 @@ entry:
 declare i64 @dummy()
 
 define i64 @f10(i64* %foo, i64 %bar, i64 %baz) {
-; ALL-LABEL: f10:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rsi
-; ALL-NEXT:    .seh_pushreg %rsi
-; ALL-NEXT:    pushq %rbx
-; ALL-NEXT:    .seh_pushreg %rbx
-; ALL-NEXT:    subq $40, %rsp
-; ALL-NEXT:    .seh_stackalloc 40
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    movq %rdx, %rsi
-; ALL-NEXT:    movq %rdx, %rax
-; ALL-NEXT:    lock cmpxchgq %r8, (%rcx)
-; ALL-NEXT:    sete %bl
-; ALL-NEXT:    callq dummy
-; ALL-NEXT:    testb %bl, %bl
-; ALL-NEXT:    cmoveq %rsi, %rax
-; ALL-NEXT:    addq $40, %rsp
-; ALL-NEXT:    popq %rbx
-; ALL-NEXT:    popq %rsi
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f10:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rsi
+; CHECK-NEXT:    .seh_pushreg %rsi
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    .seh_pushreg %rbx
+; CHECK-NEXT:    subq $40, %rsp
+; CHECK-NEXT:    .seh_stackalloc 40
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    movq %rdx, %rsi
+; CHECK-NEXT:    movq %rdx, %rax
+; CHECK-NEXT:    lock cmpxchgq %r8, (%rcx)
+; CHECK-NEXT:    sete %bl
+; CHECK-NEXT:    callq dummy
+; CHECK-NEXT:    testb %bl, %bl
+; CHECK-NEXT:    cmoveq %rsi, %rax
+; CHECK-NEXT:    addq $40, %rsp
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    popq %rsi
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   %cx = cmpxchg i64* %foo, i64 %bar, i64 %baz seq_cst seq_cst
   %v = extractvalue { i64, i1 } %cx, 0
   %p = extractvalue { i64, i1 } %cx, 1
@@ -256,28 +256,28 @@ define i64 @f10(i64* %foo, i64 %bar, i64 %baz) {
 }
 
 define i8* @f11() "frame-pointer"="all" {
-; ALL-LABEL: f11:
-; ALL:       # %bb.0:
-; ALL-NEXT:    pushq %rbp
-; ALL-NEXT:    .seh_pushreg %rbp
-; ALL-NEXT:    movq %rsp, %rbp
-; ALL-NEXT:    .seh_setframe %rbp, 0
-; ALL-NEXT:    .seh_endprologue
-; ALL-NEXT:    leaq 8(%rbp), %rax
-; ALL-NEXT:    popq %rbp
-; ALL-NEXT:    retq
-; ALL-NEXT:    .seh_handlerdata
-; ALL-NEXT:    .text
-; ALL-NEXT:    .seh_endproc
+; CHECK-LABEL: f11:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbp
+; CHECK-NEXT:    .seh_pushreg %rbp
+; CHECK-NEXT:    movq %rsp, %rbp
+; CHECK-NEXT:    .seh_setframe %rbp, 0
+; CHECK-NEXT:    .seh_endprologue
+; CHECK-NEXT:    leaq 8(%rbp), %rax
+; CHECK-NEXT:    popq %rbp
+; CHECK-NEXT:    retq
+; CHECK-NEXT:    .seh_handlerdata
+; CHECK-NEXT:    .text
+; CHECK-NEXT:    .seh_endproc
   %aora = call i8* @llvm.addressofreturnaddress()
   ret i8* %aora
 }
 
 define i8* @f12() {
-; ALL-LABEL: f12:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movq %rsp, %rax
-; ALL-NEXT:    retq
+; CHECK-LABEL: f12:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rsp, %rax
+; CHECK-NEXT:    retq
   %aora = call i8* @llvm.addressofreturnaddress()
   ret i8* %aora
 }

diff  --git a/llvm/test/CodeGen/X86/xaluo128.ll b/llvm/test/CodeGen/X86/xaluo128.ll
index 61009b9616ea..b7cd962bb2cb 100644
--- a/llvm/test/CodeGen/X86/xaluo128.ll
+++ b/llvm/test/CodeGen/X86/xaluo128.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG --check-prefix=X64
-; RUN: llc -mtriple=i686-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG --check-prefix=X86
+; RUN: llc -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i686-darwin-unknown < %s | FileCheck %s --check-prefix=X86
 
 define zeroext i1 @saddoi128(i128 %v1, i128 %v2, i128* %res) nounwind {
 ; X64-LABEL: saddoi128:

diff  --git a/llvm/test/CodeGen/X86/xmulo.ll b/llvm/test/CodeGen/X86/xmulo.ll
index 6f2c229ac1ff..3ac2ce077219 100644
--- a/llvm/test/CodeGen/X86/xmulo.ll
+++ b/llvm/test/CodeGen/X86/xmulo.ll
@@ -1,60 +1,38 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefix=SDAG
-; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefix=FAST
-; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefix=SDAG --check-prefix=KNL
+; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown < %s | FileCheck %s --check-prefixes=CHECK,SDAG
+; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown -fast-isel -fast-isel-abort=1 < %s | FileCheck %s --check-prefixes=CHECK,FAST
+; RUN: llc -disable-peephole -mtriple=x86_64-darwin-unknown -mcpu=knl < %s | FileCheck %s --check-prefixes=CHECK,SDAG
 
 define {i64, i1} @t1() nounwind {
-; SDAG-LABEL: t1:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    movl $8, %ecx
-; SDAG-NEXT:    movl $9, %eax
-; SDAG-NEXT:    mulq %rcx
-; SDAG-NEXT:    seto %dl
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: t1:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    movl $8, %ecx
-; FAST-NEXT:    movl $9, %eax
-; FAST-NEXT:    mulq %rcx
-; FAST-NEXT:    seto %dl
-; FAST-NEXT:    retq
+; CHECK-LABEL: t1:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    movl $8, %ecx
+; CHECK-NEXT:    movl $9, %eax
+; CHECK-NEXT:    mulq %rcx
+; CHECK-NEXT:    seto %dl
+; CHECK-NEXT:    retq
   %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 8)
   ret {i64, i1} %1
 }
 
 define {i64, i1} @t2() nounwind {
-; SDAG-LABEL: t2:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    xorl %eax, %eax
-; SDAG-NEXT:    xorl %edx, %edx
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: t2:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    xorl %eax, %eax
-; FAST-NEXT:    xorl %edx, %edx
-; FAST-NEXT:    retq
+; CHECK-LABEL: t2:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    xorl %edx, %edx
+; CHECK-NEXT:    retq
   %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 0)
   ret {i64, i1} %1
 }
 
 define {i64, i1} @t3() nounwind {
-; SDAG-LABEL: t3:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    movq $-1, %rcx
-; SDAG-NEXT:    movl $9, %eax
-; SDAG-NEXT:    mulq %rcx
-; SDAG-NEXT:    seto %dl
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: t3:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    movq $-1, %rcx
-; FAST-NEXT:    movl $9, %eax
-; FAST-NEXT:    mulq %rcx
-; FAST-NEXT:    seto %dl
-; FAST-NEXT:    retq
+; CHECK-LABEL: t3:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    movq $-1, %rcx
+; CHECK-NEXT:    movl $9, %eax
+; CHECK-NEXT:    mulq %rcx
+; CHECK-NEXT:    seto %dl
+; CHECK-NEXT:    retq
   %1 = call {i64, i1} @llvm.umul.with.overflow.i64(i64 9, i64 -1)
   ret {i64, i1} %1
 }
@@ -276,21 +254,13 @@ define zeroext i1 @umuloi64(i64 %v1, i64 %v2, i64* %res) {
 ; Check the use of the overflow bit in combination with a select instruction.
 ;
 define i32 @smuloselecti32(i32 %v1, i32 %v2) {
-; SDAG-LABEL: smuloselecti32:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    movl %esi, %eax
-; SDAG-NEXT:    movl %edi, %ecx
-; SDAG-NEXT:    imull %esi, %ecx
-; SDAG-NEXT:    cmovol %edi, %eax
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: smuloselecti32:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    movl %esi, %eax
-; FAST-NEXT:    movl %edi, %ecx
-; FAST-NEXT:    imull %esi, %ecx
-; FAST-NEXT:    cmovol %edi, %eax
-; FAST-NEXT:    retq
+; CHECK-LABEL: smuloselecti32:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    movl %edi, %ecx
+; CHECK-NEXT:    imull %esi, %ecx
+; CHECK-NEXT:    cmovol %edi, %eax
+; CHECK-NEXT:    retq
   %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -298,21 +268,13 @@ define i32 @smuloselecti32(i32 %v1, i32 %v2) {
 }
 
 define i64 @smuloselecti64(i64 %v1, i64 %v2) {
-; SDAG-LABEL: smuloselecti64:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    movq %rsi, %rax
-; SDAG-NEXT:    movq %rdi, %rcx
-; SDAG-NEXT:    imulq %rsi, %rcx
-; SDAG-NEXT:    cmovoq %rdi, %rax
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: smuloselecti64:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    movq %rsi, %rax
-; FAST-NEXT:    movq %rdi, %rcx
-; FAST-NEXT:    imulq %rsi, %rcx
-; FAST-NEXT:    cmovoq %rdi, %rax
-; FAST-NEXT:    retq
+; CHECK-LABEL: smuloselecti64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    movq %rdi, %rcx
+; CHECK-NEXT:    imulq %rsi, %rcx
+; CHECK-NEXT:    cmovoq %rdi, %rax
+; CHECK-NEXT:    retq
   %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -320,21 +282,13 @@ define i64 @smuloselecti64(i64 %v1, i64 %v2) {
 }
 
 define i32 @umuloselecti32(i32 %v1, i32 %v2) {
-; SDAG-LABEL: umuloselecti32:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    movl %edi, %eax
-; SDAG-NEXT:    mull %esi
-; SDAG-NEXT:    cmovol %edi, %esi
-; SDAG-NEXT:    movl %esi, %eax
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: umuloselecti32:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    movl %edi, %eax
-; FAST-NEXT:    mull %esi
-; FAST-NEXT:    cmovol %edi, %esi
-; FAST-NEXT:    movl %esi, %eax
-; FAST-NEXT:    retq
+; CHECK-LABEL: umuloselecti32:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    mull %esi
+; CHECK-NEXT:    cmovol %edi, %esi
+; CHECK-NEXT:    movl %esi, %eax
+; CHECK-NEXT:    retq
   %t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
   %obit = extractvalue {i32, i1} %t, 1
   %ret = select i1 %obit, i32 %v1, i32 %v2
@@ -342,21 +296,13 @@ define i32 @umuloselecti32(i32 %v1, i32 %v2) {
 }
 
 define i64 @umuloselecti64(i64 %v1, i64 %v2) {
-; SDAG-LABEL: umuloselecti64:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    movq %rdi, %rax
-; SDAG-NEXT:    mulq %rsi
-; SDAG-NEXT:    cmovoq %rdi, %rsi
-; SDAG-NEXT:    movq %rsi, %rax
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: umuloselecti64:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    movq %rdi, %rax
-; FAST-NEXT:    mulq %rsi
-; FAST-NEXT:    cmovoq %rdi, %rsi
-; FAST-NEXT:    movq %rsi, %rax
-; FAST-NEXT:    retq
+; CHECK-LABEL: umuloselecti64:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    mulq %rsi
+; CHECK-NEXT:    cmovoq %rdi, %rsi
+; CHECK-NEXT:    movq %rsi, %rax
+; CHECK-NEXT:    retq
   %t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)
   %obit = extractvalue {i64, i1} %t, 1
   %ret = select i1 %obit, i64 %v1, i64 %v2
@@ -695,23 +641,14 @@ continue:
 }
 
 define i1 @bug27873(i64 %c1, i1 %c2) {
-; SDAG-LABEL: bug27873:
-; SDAG:       ## %bb.0:
-; SDAG-NEXT:    movq %rdi, %rax
-; SDAG-NEXT:    movl $160, %ecx
-; SDAG-NEXT:    mulq %rcx
-; SDAG-NEXT:    seto %al
-; SDAG-NEXT:    orb %sil, %al
-; SDAG-NEXT:    retq
-;
-; FAST-LABEL: bug27873:
-; FAST:       ## %bb.0:
-; FAST-NEXT:    movq %rdi, %rax
-; FAST-NEXT:    movl $160, %ecx
-; FAST-NEXT:    mulq %rcx
-; FAST-NEXT:    seto %al
-; FAST-NEXT:    orb %sil, %al
-; FAST-NEXT:    retq
+; CHECK-LABEL: bug27873:
+; CHECK:       ## %bb.0:
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    movl $160, %ecx
+; CHECK-NEXT:    mulq %rcx
+; CHECK-NEXT:    seto %al
+; CHECK-NEXT:    orb %sil, %al
+; CHECK-NEXT:    retq
   %mul = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %c1, i64 160)
   %mul.overflow = extractvalue { i64, i1 } %mul, 1
   %x1 = or i1 %c2, %mul.overflow

diff  --git a/llvm/test/CodeGen/X86/xop-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
index b1771e8a3b85..821816d58e7c 100644
--- a/llvm/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/xop-intrinsics-fast-isel.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -mattr=+avx,+fma4,+xop | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+xop | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -mattr=+avx,+fma4,+xop | FileCheck %s
+; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+avx,+fma4,+xop | FileCheck %s
 
 ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/xop-builtins.c
 

diff  --git a/llvm/test/CodeGen/X86/xor-icmp.ll b/llvm/test/CodeGen/X86/xor-icmp.ll
index f520544d806f..95bc660dc014 100644
--- a/llvm/test/CodeGen/X86/xor-icmp.ll
+++ b/llvm/test/CodeGen/X86/xor-icmp.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown   | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefixes=CHECK,X64
+; RUN: llc < %s -mtriple=i686-unknown   | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
 ; rdar://7367229
 
 define i32 @t(i32 %a, i32 %b) nounwind ssp {

diff  --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll
index 6ac06dd314e8..efd8456db914 100644
--- a/llvm/test/CodeGen/X86/xor.ll
+++ b/llvm/test/CodeGen/X86/xor.ll
@@ -1,7 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X86
-; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,X64-LIN
-; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64,X64-WIN
+; RUN: llc < %s -mtriple=x86_64-linux -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64-LIN
+; RUN: llc < %s -mtriple=x86_64-win32 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,X64-WIN
 
 ; Though it is undefined, we want xor undef,undef to produce zero.
 define <4 x i32> @test1() nounwind {


        


More information about the llvm-commits mailing list