[llvm] 4b42757 - [X86][GlobalISel] Remove some unused check-prefixes

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 9 09:21:59 PST 2020


Author: Simon Pilgrim
Date: 2020-11-09T17:21:29Z
New Revision: 4b427579407b4323e10220cb6dab72c162d098e5

URL: https://github.com/llvm/llvm-project/commit/4b427579407b4323e10220cb6dab72c162d098e5
DIFF: https://github.com/llvm/llvm-project/commit/4b427579407b4323e10220cb6dab72c162d098e5.diff

LOG: [X86][GlobalISel] Remove some unused check-prefixes

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
    llvm/test/CodeGen/X86/GlobalISel/binop.ll
    llvm/test/CodeGen/X86/GlobalISel/br.ll
    llvm/test/CodeGen/X86/GlobalISel/brcond.ll
    llvm/test/CodeGen/X86/GlobalISel/constant.ll
    llvm/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
    llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
    llvm/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
    llvm/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
    llvm/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
    llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
    llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
    llvm/test/CodeGen/X86/GlobalISel/mul-scalar.ll
    llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll
    llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
    llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
    llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir
    llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
    llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
    llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
index fd526963a788..3fb3b54902c0 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/add-scalar.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
-; RUN: llc -mtriple=i386-linux-gnu   -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu   -global-isel -global-isel-abort=2 -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X86
 
 define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
 ; X64-LABEL: test_add_i64:
@@ -8,13 +8,13 @@ define i64 @test_add_i64(i64 %arg1, i64 %arg2) {
 ; X64-NEXT:    leaq (%rsi,%rdi), %rax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: test_add_i64:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    adcl {{[0-9]+}}(%esp), %edx
-; X32-NEXT:    retl
+; X86-LABEL: test_add_i64:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    adcl {{[0-9]+}}(%esp), %edx
+; X86-NEXT:    retl
   %ret = add i64 %arg1, %arg2
   ret i64 %ret
 }
@@ -27,11 +27,11 @@ define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
 ; X64-NEXT:    leal (%rsi,%rdi), %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: test_add_i32:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    retl
+; X86-LABEL: test_add_i32:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    retl
   %ret = add i32 %arg1, %arg2
   ret i32 %ret
 }
@@ -45,11 +45,11 @@ define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
 ; X64-NEXT:    # kill: def $ax killed $ax killed $eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: test_add_i16:
-; X32:       # %bb.0:
-; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    addw {{[0-9]+}}(%esp), %ax
-; X32-NEXT:    retl
+; X86-LABEL: test_add_i16:
+; X86:       # %bb.0:
+; X86-NEXT:    movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    addw {{[0-9]+}}(%esp), %ax
+; X86-NEXT:    retl
   %ret = add i16 %arg1, %arg2
   ret i16 %ret
 }
@@ -63,11 +63,11 @@ define i8 @test_add_i8(i8 %arg1, i8 %arg2) {
 ; X64-NEXT:    # kill: def $al killed $al killed $eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: test_add_i8:
-; X32:       # %bb.0:
-; X32-NEXT:    movb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    addb {{[0-9]+}}(%esp), %al
-; X32-NEXT:    retl
+; X86-LABEL: test_add_i8:
+; X86:       # %bb.0:
+; X86-NEXT:    movb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    addb {{[0-9]+}}(%esp), %al
+; X86-NEXT:    retl
   %ret = add i8 %arg1, %arg2
   ret i8 %ret
 }
@@ -82,15 +82,15 @@ define i32 @test_add_i1(i32 %arg1, i32 %arg2) {
 ; X64-NEXT:    andl $1, %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: test_add_i1:
-; X32:       # %bb.0:
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    cmpl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    sete %al
-; X32-NEXT:    addb %al, %al
-; X32-NEXT:    movzbl %al, %eax
-; X32-NEXT:    andl $1, %eax
-; X32-NEXT:    retl
+; X86-LABEL: test_add_i1:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT:    sete %al
+; X86-NEXT:    addb %al, %al
+; X86-NEXT:    movzbl %al, %eax
+; X86-NEXT:    andl $1, %eax
+; X86-NEXT:    retl
   %c = icmp eq i32 %arg1, %arg2
   %x = add i1 %c , %c
   %ret = zext i1 %x to i32

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/binop.ll b/llvm/test/CodeGen/X86/GlobalISel/binop.ll
index d06b758048e2..aced59dd0a6e 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/binop.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/binop.ll
@@ -1,25 +1,25 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu                                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512F
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=ALL_AVX --check-prefix=AVX512VL
+; RUN: llc -mtriple=x86_64-linux-gnu                                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx                      -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                  -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefixes=CHECK,AVX
 
 define i64 @test_sub_i64(i64 %arg1, i64 %arg2) {
-; ALL-LABEL: test_sub_i64:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movq %rdi, %rax
-; ALL-NEXT:    subq %rsi, %rax
-; ALL-NEXT:    retq
+; CHECK-LABEL: test_sub_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq %rdi, %rax
+; CHECK-NEXT:    subq %rsi, %rax
+; CHECK-NEXT:    retq
   %ret = sub i64 %arg1, %arg2
   ret i64 %ret
 }
 
 define i32 @test_sub_i32(i32 %arg1, i32 %arg2) {
-; ALL-LABEL: test_sub_i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl %edi, %eax
-; ALL-NEXT:    subl %esi, %eax
-; ALL-NEXT:    retq
+; CHECK-LABEL: test_sub_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl %edi, %eax
+; CHECK-NEXT:    subl %esi, %eax
+; CHECK-NEXT:    retq
   %ret = sub i32 %arg1, %arg2
   ret i32 %ret
 }
@@ -30,10 +30,10 @@ define float @test_add_float(float %arg1, float %arg2) {
 ; SSE-NEXT:    addss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_add_float:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_add_float:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = fadd float %arg1, %arg2
   ret float %ret
 }
@@ -44,10 +44,10 @@ define double @test_add_double(double %arg1, double %arg2) {
 ; SSE-NEXT:    addsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_add_double:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_add_double:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = fadd double %arg1, %arg2
   ret double %ret
 }
@@ -58,10 +58,10 @@ define float @test_sub_float(float %arg1, float %arg2) {
 ; SSE-NEXT:    subss %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_sub_float:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_sub_float:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = fsub float %arg1, %arg2
   ret float %ret
 }
@@ -72,10 +72,10 @@ define double @test_sub_double(double %arg1, double %arg2) {
 ; SSE-NEXT:    subsd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_sub_double:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_sub_double:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = fsub double %arg1, %arg2
   ret double %ret
 }
@@ -86,10 +86,10 @@ define <4 x i32>  @test_add_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
 ; SSE-NEXT:    paddd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_add_v4i32:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_add_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = add <4 x i32>  %arg1, %arg2
   ret <4 x i32>  %ret
 }
@@ -100,10 +100,10 @@ define <4 x i32>  @test_sub_v4i32(<4 x i32> %arg1, <4 x i32>  %arg2) {
 ; SSE-NEXT:    psubd %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_sub_v4i32:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_sub_v4i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = sub <4 x i32>  %arg1, %arg2
   ret <4 x i32>  %ret
 }
@@ -114,10 +114,10 @@ define <4 x float>  @test_add_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
 ; SSE-NEXT:    addps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_add_v4f32:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_add_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = fadd <4 x float>  %arg1, %arg2
   ret <4 x float>  %ret
 }
@@ -128,10 +128,10 @@ define <4 x float>  @test_sub_v4f32(<4 x float> %arg1, <4 x float>  %arg2) {
 ; SSE-NEXT:    subps %xmm1, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_sub_v4f32:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vsubps %xmm1, %xmm0, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_sub_v4f32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vsubps %xmm1, %xmm0, %xmm0
+; AVX-NEXT:    retq
   %ret = fsub <4 x float>  %arg1, %arg2
   ret <4 x float>  %ret
 }
@@ -142,10 +142,10 @@ define i32  @test_copy_float(float %val) {
 ; SSE-NEXT:    movd %xmm0, %eax
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_copy_float:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vmovd %xmm0, %eax
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_copy_float:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %xmm0, %eax
+; AVX-NEXT:    retq
   %r = bitcast float %val to i32
   ret i32 %r
 }
@@ -156,10 +156,10 @@ define float  @test_copy_i32(i32 %val) {
 ; SSE-NEXT:    movd %edi, %xmm0
 ; SSE-NEXT:    retq
 ;
-; ALL_AVX-LABEL: test_copy_i32:
-; ALL_AVX:       # %bb.0:
-; ALL_AVX-NEXT:    vmovd %edi, %xmm0
-; ALL_AVX-NEXT:    retq
+; AVX-LABEL: test_copy_i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vmovd %edi, %xmm0
+; AVX-NEXT:    retq
   %r = bitcast i32 %val to float
   ret float %r
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/br.ll b/llvm/test/CodeGen/X86/GlobalISel/br.ll
index 2c07a4d326e4..878fe981c988 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/br.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/br.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -O0 -mtriple=x86_64-linux-gnu    -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
+; RUN: llc -O0 -mtriple=x86_64-linux-gnu    -global-isel -verify-machineinstrs %s -o - | FileCheck %s
 
 define void @uncondbr() {
 ; CHECK-LABEL: uncondbr:

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/brcond.ll b/llvm/test/CodeGen/X86/GlobalISel/brcond.ll
index f5ff04830705..fa459363e01b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/brcond.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/brcond.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu    -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64
-; RUN: llc -mtriple=i386-linux-gnu      -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32
+; RUN: llc -mtriple=x86_64-linux-gnu    -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=X64
+; RUN: llc -mtriple=i386-linux-gnu      -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=X86
 
 define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) {
 ; X64-LABEL: test_1:
@@ -18,26 +18,26 @@ define i32 @test_1(i32 %a, i32 %b, i32 %tValue, i32 %fValue) {
 ; X64-NEXT:    movl -{{[0-9]+}}(%rsp), %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: test_1:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    pushl %eax
-; X32-NEXT:    .cfi_def_cfa_offset 8
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    cmpl %eax, {{[0-9]+}}(%esp)
-; X32-NEXT:    setl %al
-; X32-NEXT:    testb $1, %al
-; X32-NEXT:    je .LBB0_2
-; X32-NEXT:  # %bb.1: # %if.then
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    jmp .LBB0_3
-; X32-NEXT:  .LBB0_2: # %if.else
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:  .LBB0_3: # %return
-; X32-NEXT:    movl %eax, (%esp)
-; X32-NEXT:    movl (%esp), %eax
-; X32-NEXT:    popl %ecx
-; X32-NEXT:    .cfi_def_cfa_offset 4
-; X32-NEXT:    retl
+; X86-LABEL: test_1:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    pushl %eax
+; X86-NEXT:    .cfi_def_cfa_offset 8
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    cmpl %eax, {{[0-9]+}}(%esp)
+; X86-NEXT:    setl %al
+; X86-NEXT:    testb $1, %al
+; X86-NEXT:    je .LBB0_2
+; X86-NEXT:  # %bb.1: # %if.then
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    jmp .LBB0_3
+; X86-NEXT:  .LBB0_2: # %if.else
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:  .LBB0_3: # %return
+; X86-NEXT:    movl %eax, (%esp)
+; X86-NEXT:    movl (%esp), %eax
+; X86-NEXT:    popl %ecx
+; X86-NEXT:    .cfi_def_cfa_offset 4
+; X86-NEXT:    retl
 entry:
   %retval = alloca i32, align 4
   %cmp = icmp slt i32 %a, %b
@@ -68,17 +68,17 @@ define i32 @test_2(i32 %a) {
 ; X64-NEXT:    movl $1, %eax
 ; X64-NEXT:    retq
 ;
-; X32-LABEL: test_2:
-; X32:       # %bb.0: # %entry
-; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT:    testb $1, %al
-; X32-NEXT:    je .LBB1_2
-; X32-NEXT:  # %bb.1: # %if.then
-; X32-NEXT:    xorl %eax, %eax
-; X32-NEXT:    retl
-; X32-NEXT:  .LBB1_2: # %if.else
-; X32-NEXT:    movl $1, %eax
-; X32-NEXT:    retl
+; X86-LABEL: test_2:
+; X86:       # %bb.0: # %entry
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT:    testb $1, %al
+; X86-NEXT:    je .LBB1_2
+; X86-NEXT:  # %bb.1: # %if.then
+; X86-NEXT:    xorl %eax, %eax
+; X86-NEXT:    retl
+; X86-NEXT:  .LBB1_2: # %if.else
+; X86-NEXT:    movl $1, %eax
+; X86-NEXT:    retl
 entry:
   %cmp = trunc i32 %a to i1
   br i1 %cmp, label %if.then, label %if.else

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/constant.ll b/llvm/test/CodeGen/X86/GlobalISel/constant.ll
index 479ac84239d8..77751cc6b6a0 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/constant.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/constant.ll
@@ -1,62 +1,62 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s
 
 define i8 @const_i8() {
-; ALL-LABEL: const_i8:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movb $2, %al
-; ALL-NEXT:    retq
+; CHECK-LABEL: const_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb $2, %al
+; CHECK-NEXT:    retq
   ret i8 2
 }
 
 define i16 @const_i16() {
-; ALL-LABEL: const_i16:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movw $3, %ax
-; ALL-NEXT:    retq
+; CHECK-LABEL: const_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movw $3, %ax
+; CHECK-NEXT:    retq
   ret i16 3
 }
 
 define i32 @const_i32() {
-; ALL-LABEL: const_i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl $4, %eax
-; ALL-NEXT:    retq
+; CHECK-LABEL: const_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $4, %eax
+; CHECK-NEXT:    retq
   ret i32 4
 }
 
 define i64 @const_i64() {
-; ALL-LABEL: const_i64:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movabsq $68719476720, %rax # imm = 0xFFFFFFFF0
-; ALL-NEXT:    retq
+; CHECK-LABEL: const_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movabsq $68719476720, %rax # imm = 0xFFFFFFFF0
+; CHECK-NEXT:    retq
   ret i64 68719476720
 }
 
 ;i64 value fit into u32
 define i64 @const_i64_u32() {
-; ALL-LABEL: const_i64_u32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl $1879048192, %eax # imm = 0x70000000
-; ALL-NEXT:    retq
+; CHECK-LABEL: const_i64_u32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl $1879048192, %eax # imm = 0x70000000
+; CHECK-NEXT:    retq
   ret i64 1879048192
 }
 
 ;i64 value fit into i32
 define i64 @const_i64_i32() {
-; ALL-LABEL: const_i64_i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movq $-1, %rax
-; ALL-NEXT:    retq
+; CHECK-LABEL: const_i64_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq $-1, %rax
+; CHECK-NEXT:    retq
   ret i64 -1
 }
 
 define void @main(i32 ** %data) {
-; ALL-LABEL: main:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movq $0, %rax
-; ALL-NEXT:    movq %rax, (%rdi)
-; ALL-NEXT:    retq
+; CHECK-LABEL: main:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movq $0, %rax
+; CHECK-NEXT:    movq %rax, (%rdi)
+; CHECK-NEXT:    retq
   store i32* null, i32** %data, align 8
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/fadd-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
index 0fa1142c30a6..73be29e5d843 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/fadd-scalar.ll
@@ -1,19 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+
 define float @test_fadd_float(float %arg1, float %arg2) {
-; ALL-LABEL: test_fadd_float:
-; ALL:       # %bb.0:
-; ALL-NEXT:    addss %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fadd_float:
+; X64:       # %bb.0:
+; X64-NEXT:    addss %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fadd float %arg1, %arg2
   ret float %ret
 }
 
 define double @test_fadd_double(double %arg1, double %arg2) {
-; ALL-LABEL: test_fadd_double:
-; ALL:       # %bb.0:
-; ALL-NEXT:    addsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fadd_double:
+; X64:       # %bb.0:
+; X64-NEXT:    addsd %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fadd double %arg1, %arg2
   ret double %ret
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll b/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
index 6c3586acd377..0b73b7730740 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/fconstant.ll
@@ -1,26 +1,26 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel                       -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK64 --check-prefix=CHECK_SMALL --check-prefix=CHECK_SMALL64 --check-prefix=CHECK_NOPIC64
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -code-model=large     -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK64 --check-prefix=CHECK_LARGE --check-prefix=CHECK_LARGE64
-; RUN: llc -mtriple=i386-linux-gnu   -mattr=+sse2 -global-isel                       -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK32 --check-prefix=CHECK_SMALL --check-prefix=CHECK_SMALL32
-; RUN: llc -mtriple=i386-linux-gnu   -mattr=+sse2 -global-isel -code-model=large     -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK32 --check-prefix=CHECK_LARGE --check-prefix=CHECK_LARGE32
-; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -relocation-model=pic -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK64 --check-prefix=CHECK_SMALL --check-prefix=CHECK_SMALL64 --check-prefix=CHECK_PIC64
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel                       -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK64_SMALL
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -code-model=large     -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK64_LARGE
+; RUN: llc -mtriple=i386-linux-gnu   -mattr=+sse2 -global-isel                       -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK32
+; RUN: llc -mtriple=i386-linux-gnu   -mattr=+sse2 -global-isel -code-model=large     -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK32
+; RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -global-isel -relocation-model=pic -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK64_SMALL
 
 define void @test_float(float* %a , float %b) {
-; CHECK_SMALL64-LABEL: test_float:
-; CHECK_SMALL64:       # %bb.0: # %entry
-; CHECK_SMALL64-NEXT:    movss .LCPI0_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
-; CHECK_SMALL64-NEXT:    addss %xmm0, %xmm1
-; CHECK_SMALL64-NEXT:    movd %xmm1, %eax
-; CHECK_SMALL64-NEXT:    movl %eax, (%rdi)
-; CHECK_SMALL64-NEXT:    retq
+; CHECK64_SMALL-LABEL: test_float:
+; CHECK64_SMALL:       # %bb.0: # %entry
+; CHECK64_SMALL-NEXT:    movss .LCPI0_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
+; CHECK64_SMALL-NEXT:    addss %xmm0, %xmm1
+; CHECK64_SMALL-NEXT:    movd %xmm1, %eax
+; CHECK64_SMALL-NEXT:    movl %eax, (%rdi)
+; CHECK64_SMALL-NEXT:    retq
 ;
-; CHECK_LARGE64-LABEL: test_float:
-; CHECK_LARGE64:       # %bb.0: # %entry
-; CHECK_LARGE64-NEXT:    movabsq $.LCPI0_0, %rax
-; CHECK_LARGE64-NEXT:    addss (%rax), %xmm0
-; CHECK_LARGE64-NEXT:    movd %xmm0, %eax
-; CHECK_LARGE64-NEXT:    movl %eax, (%rdi)
-; CHECK_LARGE64-NEXT:    retq
+; CHECK64_LARGE-LABEL: test_float:
+; CHECK64_LARGE:       # %bb.0: # %entry
+; CHECK64_LARGE-NEXT:    movabsq $.LCPI0_0, %rax
+; CHECK64_LARGE-NEXT:    addss (%rax), %xmm0
+; CHECK64_LARGE-NEXT:    movd %xmm0, %eax
+; CHECK64_LARGE-NEXT:    movl %eax, (%rdi)
+; CHECK64_LARGE-NEXT:    retq
 ;
 ; CHECK32-LABEL: test_float:
 ; CHECK32:       # %bb.0: # %entry

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
index e05a36c4997e..f2dc6de08528 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/fdiv-scalar.ll
@@ -1,19 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+
 define float @test_fdiv_float(float %arg1, float %arg2) {
-; ALL-LABEL: test_fdiv_float:
-; ALL:       # %bb.0:
-; ALL-NEXT:    divss %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fdiv_float:
+; X64:       # %bb.0:
+; X64-NEXT:    divss %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fdiv float %arg1, %arg2
   ret float %ret
 }
 
 define double @test_fdiv_double(double %arg1, double %arg2) {
-; ALL-LABEL: test_fdiv_double:
-; ALL:       # %bb.0:
-; ALL-NEXT:    divsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fdiv_double:
+; X64:       # %bb.0:
+; X64-NEXT:    divsd %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fdiv double %arg1, %arg2
   ret double %ret
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/fmul-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
index c2244cb8a5c3..187593d082c5 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/fmul-scalar.ll
@@ -1,19 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+
 define float @test_fmul_float(float %arg1, float %arg2) {
-; ALL-LABEL: test_fmul_float:
-; ALL:       # %bb.0:
-; ALL-NEXT:    mulss %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fmul_float:
+; X64:       # %bb.0:
+; X64-NEXT:    mulss %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fmul float %arg1, %arg2
   ret float %ret
 }
 
 define double @test_fmul_double(double %arg1, double %arg2) {
-; ALL-LABEL: test_fmul_double:
-; ALL:       # %bb.0:
-; ALL-NEXT:    mulsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fmul_double:
+; X64:       # %bb.0:
+; X64-NEXT:    mulsd %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fmul double %arg1, %arg2
   ret double %ret
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/fsub-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
index 7fc9dd31490e..b2a82834af5b 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/fsub-scalar.ll
@@ -1,19 +1,20 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
+
 define float @test_fsub_float(float %arg1, float %arg2) {
-; ALL-LABEL: test_fsub_float:
-; ALL:       # %bb.0:
-; ALL-NEXT:    subss %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fsub_float:
+; X64:       # %bb.0:
+; X64-NEXT:    subss %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fsub float %arg1, %arg2
   ret float %ret
 }
 
 define double @test_fsub_double(double %arg1, double %arg2) {
-; ALL-LABEL: test_fsub_double:
-; ALL:       # %bb.0:
-; ALL-NEXT:    subsd %xmm1, %xmm0
-; ALL-NEXT:    retq
+; X64-LABEL: test_fsub_double:
+; X64:       # %bb.0:
+; X64-NEXT:    subsd %xmm1, %xmm0
+; X64-NEXT:    retq
   %ret = fsub double %arg1, %arg2
   ret double %ret
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
index 5eb6052d4948..a4829917c324 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
@@ -1,38 +1,38 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -O0 -mtriple=i386-linux-gnu   -mattr=+sse2 -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
-; RUN: llc -O0 -mtriple=x86_64-linux-gnu              -global-isel -stop-after=irtranslator < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -O0 -mtriple=i386-linux-gnu   -mattr=+sse2 -global-isel -stop-after=irtranslator < %s | FileCheck %s --check-prefix=X86
+; RUN: llc -O0 -mtriple=x86_64-linux-gnu              -global-isel -stop-after=irtranslator < %s | FileCheck %s --check-prefix=X64
 
 @a1_8bit = external global i8
 @a7_8bit = external global i8
 @a8_8bit = external global i8
 
-define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4,
-  ; X32-LABEL: name: test_i8_args_8
-  ; X32: bb.1.entry:
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
-  ; X32:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.7, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
-  ; X32:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.6, align 4)
-  ; X32:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; X32:   [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 1 from %fixed-stack.5, align 8)
-  ; X32:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; X32:   [[LOAD3:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 1 from %fixed-stack.4, align 4)
-  ; X32:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; X32:   [[LOAD4:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 1 from %fixed-stack.3, align 16)
-  ; X32:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; X32:   [[LOAD5:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 1 from %fixed-stack.2, align 4)
-  ; X32:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD6:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 1 from %fixed-stack.1, align 8)
-  ; X32:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD7:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 1 from %fixed-stack.0, align 4)
-  ; X32:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_8bit
-  ; X32:   [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_8bit
-  ; X32:   [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_8bit
-  ; X32:   G_STORE [[LOAD]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
-  ; X32:   G_STORE [[LOAD6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
-  ; X32:   G_STORE [[LOAD7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
-  ; X32:   $al = COPY [[LOAD]](s8)
-  ; X32:   RET 0, implicit $al
+define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4, i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) {
+  ; X86-LABEL: name: test_i8_args_8
+  ; X86: bb.1.entry:
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
+  ; X86:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.7, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; X86:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.6, align 4)
+  ; X86:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; X86:   [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 1 from %fixed-stack.5, align 8)
+  ; X86:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; X86:   [[LOAD3:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 1 from %fixed-stack.4, align 4)
+  ; X86:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; X86:   [[LOAD4:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 1 from %fixed-stack.3, align 16)
+  ; X86:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; X86:   [[LOAD5:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 1 from %fixed-stack.2, align 4)
+  ; X86:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD6:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 1 from %fixed-stack.1, align 8)
+  ; X86:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD7:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 1 from %fixed-stack.0, align 4)
+  ; X86:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_8bit
+  ; X86:   [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_8bit
+  ; X86:   [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_8bit
+  ; X86:   G_STORE [[LOAD]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
+  ; X86:   G_STORE [[LOAD6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
+  ; X86:   G_STORE [[LOAD7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
+  ; X86:   $al = COPY [[LOAD]](s8)
+  ; X86:   RET 0, implicit $al
   ; X64-LABEL: name: test_i8_args_8
   ; X64: bb.1.entry:
   ; X64:   liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
@@ -60,7 +60,6 @@ define i8 @test_i8_args_8(i8 %arg1, i8 %arg2, i8 %arg3, i8 %arg4,
   ; X64:   G_STORE [[LOAD1]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
   ; X64:   $al = COPY [[TRUNC]](s8)
   ; X64:   RET 0, implicit $al
-		                      i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) {
 entry:
   store i8 %arg1, i8* @a1_8bit
   store i8 %arg7, i8* @a7_8bit
@@ -72,33 +71,33 @@ entry:
 @a7_32bit = external global i32
 @a8_32bit = external global i32
 
-define i32 @test_i32_args_8(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4,
-  ; X32-LABEL: name: test_i32_args_8
-  ; X32: bb.1.entry:
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
-  ; X32:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.7, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
-  ; X32:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.6)
-  ; X32:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; X32:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.5, align 8)
-  ; X32:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; X32:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 4 from %fixed-stack.4)
-  ; X32:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; X32:   [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 4 from %fixed-stack.3, align 16)
-  ; X32:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; X32:   [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 4 from %fixed-stack.2)
-  ; X32:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 4 from %fixed-stack.1, align 8)
-  ; X32:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 4 from %fixed-stack.0)
-  ; X32:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_32bit
-  ; X32:   [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_32bit
-  ; X32:   [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_32bit
-  ; X32:   G_STORE [[LOAD]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
-  ; X32:   G_STORE [[LOAD6]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
-  ; X32:   G_STORE [[LOAD7]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
-  ; X32:   $eax = COPY [[LOAD]](s32)
-  ; X32:   RET 0, implicit $eax
+define i32 @test_i32_args_8(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) {
+  ; X86-LABEL: name: test_i32_args_8
+  ; X86: bb.1.entry:
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
+  ; X86:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.7, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; X86:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.6)
+  ; X86:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; X86:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.5, align 8)
+  ; X86:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; X86:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 4 from %fixed-stack.4)
+  ; X86:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; X86:   [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 4 from %fixed-stack.3, align 16)
+  ; X86:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; X86:   [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 4 from %fixed-stack.2)
+  ; X86:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 4 from %fixed-stack.1, align 8)
+  ; X86:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 4 from %fixed-stack.0)
+  ; X86:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_32bit
+  ; X86:   [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_32bit
+  ; X86:   [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_32bit
+  ; X86:   G_STORE [[LOAD]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
+  ; X86:   G_STORE [[LOAD6]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
+  ; X86:   G_STORE [[LOAD7]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
+  ; X86:   $eax = COPY [[LOAD]](s32)
+  ; X86:   RET 0, implicit $eax
   ; X64-LABEL: name: test_i32_args_8
   ; X64: bb.1.entry:
   ; X64:   liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
@@ -120,7 +119,6 @@ define i32 @test_i32_args_8(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4,
   ; X64:   G_STORE [[LOAD1]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
   ; X64:   $eax = COPY [[COPY]](s32)
   ; X64:   RET 0, implicit $eax
-		                        i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) {
 entry:
   store i32 %arg1, i32* @a1_32bit
   store i32 %arg7, i32* @a7_32bit
@@ -132,59 +130,60 @@ entry:
 @a7_64bit = external global i64
 @a8_64bit = external global i64
 
-define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4,
-  ; X32-LABEL: name: test_i64_args_8
-  ; X32: bb.1.entry:
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.15
-  ; X32:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.15, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.14
-  ; X32:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.14)
-  ; X32:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.13
-  ; X32:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.13, align 8)
-  ; X32:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.12
-  ; X32:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 4 from %fixed-stack.12)
-  ; X32:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.11
-  ; X32:   [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 4 from %fixed-stack.11, align 16)
-  ; X32:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.10
-  ; X32:   [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 4 from %fixed-stack.10)
-  ; X32:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.9
-  ; X32:   [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 4 from %fixed-stack.9, align 8)
-  ; X32:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.8
-  ; X32:   [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 4 from %fixed-stack.8)
-  ; X32:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
-  ; X32:   [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p0) :: (invariant load 4 from %fixed-stack.7, align 16)
-  ; X32:   [[FRAME_INDEX9:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
-  ; X32:   [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p0) :: (invariant load 4 from %fixed-stack.6)
-  ; X32:   [[FRAME_INDEX10:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
-  ; X32:   [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p0) :: (invariant load 4 from %fixed-stack.5, align 8)
-  ; X32:   [[FRAME_INDEX11:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
-  ; X32:   [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p0) :: (invariant load 4 from %fixed-stack.4)
-  ; X32:   [[FRAME_INDEX12:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
-  ; X32:   [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p0) :: (invariant load 4 from %fixed-stack.3, align 16)
-  ; X32:   [[FRAME_INDEX13:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
-  ; X32:   [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p0) :: (invariant load 4 from %fixed-stack.2)
-  ; X32:   [[FRAME_INDEX14:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p0) :: (invariant load 4 from %fixed-stack.1, align 8)
-  ; X32:   [[FRAME_INDEX15:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p0) :: (invariant load 4 from %fixed-stack.0)
-  ; X32:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
-  ; X32:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
-  ; X32:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
-  ; X32:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
-  ; X32:   [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD8]](s32), [[LOAD9]](s32)
-  ; X32:   [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD10]](s32), [[LOAD11]](s32)
-  ; X32:   [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD12]](s32), [[LOAD13]](s32)
-  ; X32:   [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD14]](s32), [[LOAD15]](s32)
-  ; X32:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_64bit
-  ; X32:   [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_64bit
-  ; X32:   [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_64bit
-  ; X32:   G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @a1_64bit, align 4)
-  ; X32:   G_STORE [[MV6]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit, align 4)
-  ; X32:   G_STORE [[MV7]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit, align 4)
-  ; X32:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
-  ; X32:   $eax = COPY [[UV]](s32)
-  ; X32:   $edx = COPY [[UV1]](s32)
-  ; X32:   RET 0, implicit $eax, implicit $edx
+; ... a bunch more that we don't track ...
+define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
+  ; X86-LABEL: name: test_i64_args_8
+  ; X86: bb.1.entry:
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.15
+  ; X86:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.15, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.14
+  ; X86:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.14)
+  ; X86:   [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.13
+  ; X86:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (invariant load 4 from %fixed-stack.13, align 8)
+  ; X86:   [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.12
+  ; X86:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (invariant load 4 from %fixed-stack.12)
+  ; X86:   [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.11
+  ; X86:   [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p0) :: (invariant load 4 from %fixed-stack.11, align 16)
+  ; X86:   [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.10
+  ; X86:   [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p0) :: (invariant load 4 from %fixed-stack.10)
+  ; X86:   [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.9
+  ; X86:   [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p0) :: (invariant load 4 from %fixed-stack.9, align 8)
+  ; X86:   [[FRAME_INDEX7:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.8
+  ; X86:   [[LOAD7:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX7]](p0) :: (invariant load 4 from %fixed-stack.8)
+  ; X86:   [[FRAME_INDEX8:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.7
+  ; X86:   [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p0) :: (invariant load 4 from %fixed-stack.7, align 16)
+  ; X86:   [[FRAME_INDEX9:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6
+  ; X86:   [[LOAD9:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX9]](p0) :: (invariant load 4 from %fixed-stack.6)
+  ; X86:   [[FRAME_INDEX10:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5
+  ; X86:   [[LOAD10:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX10]](p0) :: (invariant load 4 from %fixed-stack.5, align 8)
+  ; X86:   [[FRAME_INDEX11:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4
+  ; X86:   [[LOAD11:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX11]](p0) :: (invariant load 4 from %fixed-stack.4)
+  ; X86:   [[FRAME_INDEX12:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
+  ; X86:   [[LOAD12:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX12]](p0) :: (invariant load 4 from %fixed-stack.3, align 16)
+  ; X86:   [[FRAME_INDEX13:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; X86:   [[LOAD13:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX13]](p0) :: (invariant load 4 from %fixed-stack.2)
+  ; X86:   [[FRAME_INDEX14:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD14:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX14]](p0) :: (invariant load 4 from %fixed-stack.1, align 8)
+  ; X86:   [[FRAME_INDEX15:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD15:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX15]](p0) :: (invariant load 4 from %fixed-stack.0)
+  ; X86:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD]](s32), [[LOAD1]](s32)
+  ; X86:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
+  ; X86:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD4]](s32), [[LOAD5]](s32)
+  ; X86:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD6]](s32), [[LOAD7]](s32)
+  ; X86:   [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD8]](s32), [[LOAD9]](s32)
+  ; X86:   [[MV5:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD10]](s32), [[LOAD11]](s32)
+  ; X86:   [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD12]](s32), [[LOAD13]](s32)
+  ; X86:   [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD14]](s32), [[LOAD15]](s32)
+  ; X86:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a1_64bit
+  ; X86:   [[GV1:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a7_64bit
+  ; X86:   [[GV2:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a8_64bit
+  ; X86:   G_STORE [[MV]](s64), [[GV]](p0) :: (store 8 into @a1_64bit, align 4)
+  ; X86:   G_STORE [[MV6]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit, align 4)
+  ; X86:   G_STORE [[MV7]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit, align 4)
+  ; X86:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
+  ; X86:   $eax = COPY [[UV]](s32)
+  ; X86:   $edx = COPY [[UV1]](s32)
+  ; X86:   RET 0, implicit $eax, implicit $edx
   ; X64-LABEL: name: test_i64_args_8
   ; X64: bb.1.entry:
   ; X64:   liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
@@ -206,8 +205,6 @@ define i64 @test_i64_args_8(i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4,
   ; X64:   G_STORE [[LOAD1]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit)
   ; X64:   $rax = COPY [[COPY]](s64)
   ; X64:   RET 0, implicit $rax
-                            i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
-; ... a bunch more that we don't track ...
 entry:
   store i64 %arg1, i64* @a1_64bit
   store i64 %arg7, i64* @a7_64bit
@@ -216,15 +213,15 @@ entry:
 }
 
 define float @test_float_args(float %arg1, float %arg2) {
-  ; X32-LABEL: name: test_float_args
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
-  ; X32:   [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s32)
-  ; X32:   $fp0 = COPY [[ANYEXT]](s80)
-  ; X32:   RET 0, implicit $fp0
+  ; X86-LABEL: name: test_float_args
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
+  ; X86:   [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s32)
+  ; X86:   $fp0 = COPY [[ANYEXT]](s80)
+  ; X86:   RET 0, implicit $fp0
   ; X64-LABEL: name: test_float_args
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $xmm0, $xmm1
@@ -239,15 +236,15 @@ define float @test_float_args(float %arg1, float %arg2) {
 }
 
 define double @test_double_args(double %arg1, double %arg2) {
-  ; X32-LABEL: name: test_double_args
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0)
-  ; X32:   [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s64)
-  ; X32:   $fp0 = COPY [[ANYEXT]](s80)
-  ; X32:   RET 0, implicit $fp0
+  ; X86-LABEL: name: test_double_args
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0)
+  ; X86:   [[ANYEXT:%[0-9]+]]:_(s80) = G_ANYEXT [[LOAD1]](s64)
+  ; X86:   $fp0 = COPY [[ANYEXT]](s80)
+  ; X86:   RET 0, implicit $fp0
   ; X64-LABEL: name: test_double_args
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $xmm0, $xmm1
@@ -262,13 +259,13 @@ define double @test_double_args(double %arg1, double %arg2) {
 }
 
 define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
-  ; X32-LABEL: name: test_v4i32_args
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   liveins: $xmm0, $xmm1
-  ; X32:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
-  ; X32:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
-  ; X32:   $xmm0 = COPY [[COPY1]](<4 x s32>)
-  ; X32:   RET 0, implicit $xmm0
+  ; X86-LABEL: name: test_v4i32_args
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   liveins: $xmm0, $xmm1
+  ; X86:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+  ; X86:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+  ; X86:   $xmm0 = COPY [[COPY1]](<4 x s32>)
+  ; X86:   RET 0, implicit $xmm0
   ; X64-LABEL: name: test_v4i32_args
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $xmm0, $xmm1
@@ -280,16 +277,16 @@ define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
 }
 
 define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
-  ; X32-LABEL: name: test_v8i32_args
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   liveins: $xmm0, $xmm1
-  ; X32:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
-  ; X32:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
-  ; X32:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
-  ; X32:   [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s32>)
-  ; X32:   $xmm0 = COPY [[UV]](<4 x s32>)
-  ; X32:   $xmm1 = COPY [[UV1]](<4 x s32>)
-  ; X32:   RET 0, implicit $xmm0, implicit $xmm1
+  ; X86-LABEL: name: test_v8i32_args
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   liveins: $xmm0, $xmm1
+  ; X86:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+  ; X86:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+  ; X86:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
+  ; X86:   [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<8 x s32>)
+  ; X86:   $xmm0 = COPY [[UV]](<4 x s32>)
+  ; X86:   $xmm1 = COPY [[UV1]](<4 x s32>)
+  ; X86:   RET 0, implicit $xmm0, implicit $xmm1
   ; X64-LABEL: name: test_v8i32_args
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $xmm0, $xmm1
@@ -304,9 +301,9 @@ define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
 }
 
 define void @test_void_return() {
-  ; X32-LABEL: name: test_void_return
-  ; X32: bb.1.entry:
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_void_return
+  ; X86: bb.1.entry:
+  ; X86:   RET 0
   ; X64-LABEL: name: test_void_return
   ; X64: bb.1.entry:
   ; X64:   RET 0
@@ -315,12 +312,12 @@ entry:
 }
 
 define i32 * @test_memop_i32(i32 * %p1) {
-  ; X32-LABEL: name: test_memop_i32
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
-  ; X32:   $eax = COPY [[LOAD]](p0)
-  ; X32:   RET 0, implicit $eax
+  ; X86-LABEL: name: test_memop_i32
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
+  ; X86:   $eax = COPY [[LOAD]](p0)
+  ; X86:   RET 0, implicit $eax
   ; X64-LABEL: name: test_memop_i32
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $rdi
@@ -332,12 +329,12 @@ define i32 * @test_memop_i32(i32 * %p1) {
 
 declare void @trivial_callee()
 define void @test_trivial_call() {
-  ; X32-LABEL: name: test_trivial_call
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   CALLpcrel32 @trivial_callee, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_trivial_call
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   CALLpcrel32 @trivial_callee, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   RET 0
   ; X64-LABEL: name: test_trivial_call
   ; X64: bb.1 (%ir-block.0):
   ; X64:   ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
@@ -350,24 +347,24 @@ define void @test_trivial_call() {
 
 declare void @simple_arg_callee(i32 %in0, i32 %in1)
 define void @test_simple_arg(i32 %in0, i32 %in1) {
-  ; X32-LABEL: name: test_simple_arg
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
-  ; X32:   ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; X32:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
-  ; X32:   G_STORE [[LOAD1]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
-  ; X32:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-  ; X32:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 1)
-  ; X32:   CALLpcrel32 @simple_arg_callee, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_simple_arg
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
+  ; X86:   ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; X86:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+  ; X86:   G_STORE [[LOAD1]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
+  ; X86:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; X86:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD1]](p0) :: (store 4 into stack + 4, align 1)
+  ; X86:   CALLpcrel32 @simple_arg_callee, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   RET 0
   ; X64-LABEL: name: test_simple_arg
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $edi, $esi
@@ -385,46 +382,46 @@ define void @test_simple_arg(i32 %in0, i32 %in1) {
 
 declare void @simple_arg8_callee(i32 %arg1, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8)
 define void @test_simple_arg8_call(i32 %in0) {
-  ; X32-LABEL: name: test_simple_arg8_call
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
-  ; X32:   ADJCALLSTACKDOWN32 32, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; X32:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
-  ; X32:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-  ; X32:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 1)
-  ; X32:   [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
-  ; X32:   [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP2]](p0) :: (store 4 into stack + 8, align 1)
-  ; X32:   [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
-  ; X32:   [[GEP3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C3]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP3]](p0) :: (store 4 into stack + 12, align 1)
-  ; X32:   [[COPY4:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
-  ; X32:   [[GEP4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C4]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP4]](p0) :: (store 4 into stack + 16, align 1)
-  ; X32:   [[COPY5:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
-  ; X32:   [[GEP5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C5]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP5]](p0) :: (store 4 into stack + 20, align 1)
-  ; X32:   [[COPY6:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
-  ; X32:   [[GEP6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY6]], [[C6]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP6]](p0) :: (store 4 into stack + 24, align 1)
-  ; X32:   [[COPY7:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
-  ; X32:   [[GEP7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C7]](s32)
-  ; X32:   G_STORE [[LOAD]](s32), [[GEP7]](p0) :: (store 4 into stack + 28, align 1)
-  ; X32:   CALLpcrel32 @simple_arg8_callee, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 32, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_simple_arg8_call
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
+  ; X86:   ADJCALLSTACKDOWN32 32, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; X86:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
+  ; X86:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; X86:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD1]](p0) :: (store 4 into stack + 4, align 1)
+  ; X86:   [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+  ; X86:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD2]](p0) :: (store 4 into stack + 8, align 1)
+  ; X86:   [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
+  ; X86:   [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C3]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD3]](p0) :: (store 4 into stack + 12, align 1)
+  ; X86:   [[COPY4:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+  ; X86:   [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY4]], [[C4]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD4]](p0) :: (store 4 into stack + 16, align 1)
+  ; X86:   [[COPY5:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
+  ; X86:   [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY5]], [[C5]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD5]](p0) :: (store 4 into stack + 20, align 1)
+  ; X86:   [[COPY6:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+  ; X86:   [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY6]], [[C6]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD6]](p0) :: (store 4 into stack + 24, align 1)
+  ; X86:   [[COPY7:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
+  ; X86:   [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY7]], [[C7]](s32)
+  ; X86:   G_STORE [[LOAD]](s32), [[PTR_ADD7]](p0) :: (store 4 into stack + 28, align 1)
+  ; X86:   CALLpcrel32 @simple_arg8_callee, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 32, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   RET 0
   ; X64-LABEL: name: test_simple_arg8_call
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $edi
@@ -438,12 +435,12 @@ define void @test_simple_arg8_call(i32 %in0) {
   ; X64:   $r9d = COPY [[COPY]](s32)
   ; X64:   [[COPY1:%[0-9]+]]:_(p0) = COPY $rsp
   ; X64:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-  ; X64:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
-  ; X64:   G_STORE [[COPY]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
+  ; X64:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s64)
+  ; X64:   G_STORE [[COPY]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
   ; X64:   [[COPY2:%[0-9]+]]:_(p0) = COPY $rsp
   ; X64:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
-  ; X64:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C1]](s64)
-  ; X64:   G_STORE [[COPY]](s32), [[GEP1]](p0) :: (store 4 into stack + 8, align 1)
+  ; X64:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C1]](s64)
+  ; X64:   G_STORE [[COPY]](s32), [[PTR_ADD1]](p0) :: (store 4 into stack + 8, align 1)
   ; X64:   CALL64pcrel32 @simple_arg8_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi, implicit $edx, implicit $ecx, implicit $r8d, implicit $r9d
   ; X64:   ADJCALLSTACKUP64 16, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
   ; X64:   RET 0
@@ -453,20 +450,20 @@ define void @test_simple_arg8_call(i32 %in0) {
 
 declare i32 @simple_return_callee(i32 %in0)
 define i32 @test_simple_return_callee() {
-  ; X32-LABEL: name: test_simple_return_callee
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
-  ; X32:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; X32:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s32)
-  ; X32:   G_STORE [[C]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
-  ; X32:   CALLpcrel32 @simple_return_callee, csr_32, implicit $esp, implicit $ssp, implicit-def $eax
-  ; X32:   [[COPY1:%[0-9]+]]:_(s32) = COPY $eax
-  ; X32:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]]
-  ; X32:   $eax = COPY [[ADD]](s32)
-  ; X32:   RET 0, implicit $eax
+  ; X86-LABEL: name: test_simple_return_callee
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+  ; X86:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; X86:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C1]](s32)
+  ; X86:   G_STORE [[C]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
+  ; X86:   CALLpcrel32 @simple_return_callee, csr_32, implicit $esp, implicit $ssp, implicit-def $eax
+  ; X86:   [[COPY1:%[0-9]+]]:_(s32) = COPY $eax
+  ; X86:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]]
+  ; X86:   $eax = COPY [[ADD]](s32)
+  ; X86:   RET 0, implicit $eax
   ; X64-LABEL: name: test_simple_return_callee
   ; X64: bb.1 (%ir-block.0):
   ; X64:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
@@ -485,30 +482,30 @@ define i32 @test_simple_return_callee() {
 
 declare <8 x i32> @split_return_callee(<8 x i32> %in0)
 define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
-  ; X32-LABEL: name: test_split_return_callee
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   liveins: $xmm0, $xmm1, $xmm2
-  ; X32:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
-  ; X32:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
-  ; X32:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 16 from %fixed-stack.0)
-  ; X32:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
-  ; X32:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>)
-  ; X32:   ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS1]](<8 x s32>)
-  ; X32:   $xmm0 = COPY [[UV]](<4 x s32>)
-  ; X32:   $xmm1 = COPY [[UV1]](<4 x s32>)
-  ; X32:   CALLpcrel32 @split_return_callee, csr_32, implicit $esp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
-  ; X32:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
-  ; X32:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
-  ; X32:   [[CONCAT_VECTORS2:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>)
-  ; X32:   ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[CONCAT_VECTORS]], [[CONCAT_VECTORS2]]
-  ; X32:   [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
-  ; X32:   $xmm0 = COPY [[UV2]](<4 x s32>)
-  ; X32:   $xmm1 = COPY [[UV3]](<4 x s32>)
-  ; X32:   RET 0, implicit $xmm0, implicit $xmm1
+  ; X86-LABEL: name: test_split_return_callee
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   liveins: $xmm0, $xmm1, $xmm2
+  ; X86:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+  ; X86:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+  ; X86:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 16 from %fixed-stack.0)
+  ; X86:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
+  ; X86:   [[CONCAT_VECTORS1:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>)
+  ; X86:   ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[CONCAT_VECTORS1]](<8 x s32>)
+  ; X86:   $xmm0 = COPY [[UV]](<4 x s32>)
+  ; X86:   $xmm1 = COPY [[UV1]](<4 x s32>)
+  ; X86:   CALLpcrel32 @split_return_callee, csr_32, implicit $esp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
+  ; X86:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+  ; X86:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+  ; X86:   [[CONCAT_VECTORS2:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>)
+  ; X86:   ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[CONCAT_VECTORS]], [[CONCAT_VECTORS2]]
+  ; X86:   [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
+  ; X86:   $xmm0 = COPY [[UV2]](<4 x s32>)
+  ; X86:   $xmm1 = COPY [[UV3]](<4 x s32>)
+  ; X86:   RET 0, implicit $xmm0, implicit $xmm1
   ; X64-LABEL: name: test_split_return_callee
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $xmm0, $xmm1, $xmm2, $xmm3
@@ -538,14 +535,14 @@ define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
 }
 
 define void @test_indirect_call(void()* %func) {
-  ; X32-LABEL: name: test_indirect_call
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD:%[0-9]+]]:gr32(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
-  ; X32:   ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   CALL32r [[LOAD]](p0), csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_indirect_call
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD:%[0-9]+]]:gr32(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
+  ; X86:   ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   CALL32r [[LOAD]](p0), csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   RET 0
   ; X64-LABEL: name: test_indirect_call
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $rdi
@@ -561,34 +558,34 @@ define void @test_indirect_call(void()* %func) {
 
 declare void @take_char(i8)
 define void @test_abi_exts_call(i8* %addr) {
-  ; X32-LABEL: name: test_abi_exts_call
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
-  ; X32:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load 1 from %ir.addr)
-  ; X32:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; X32:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
-  ; X32:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8)
-  ; X32:   G_STORE [[ANYEXT]](s32), [[GEP]](p0) :: (store 4 into stack, align 1)
-  ; X32:   CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s32)
-  ; X32:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
-  ; X32:   G_STORE [[SEXT]](s32), [[GEP1]](p0) :: (store 4 into stack, align 1)
-  ; X32:   CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[GEP2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY3]], [[C]](s32)
-  ; X32:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
-  ; X32:   G_STORE [[ZEXT]](s32), [[GEP2]](p0) :: (store 4 into stack, align 1)
-  ; X32:   CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_abi_exts_call
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
+  ; X86:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load 1 from %ir.addr)
+  ; X86:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; X86:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+  ; X86:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8)
+  ; X86:   G_STORE [[ANYEXT]](s32), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
+  ; X86:   CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C]](s32)
+  ; X86:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
+  ; X86:   G_STORE [[SEXT]](s32), [[PTR_ADD1]](p0) :: (store 4 into stack, align 1)
+  ; X86:   CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C]](s32)
+  ; X86:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
+  ; X86:   G_STORE [[ZEXT]](s32), [[PTR_ADD2]](p0) :: (store 4 into stack, align 1)
+  ; X86:   CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   RET 0
   ; X64-LABEL: name: test_abi_exts_call
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $rdi
@@ -619,26 +616,26 @@ define void @test_abi_exts_call(i8* %addr) {
 
 declare void @variadic_callee(i8*, ...)
 define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
-  ; X32-LABEL: name: test_variadic_call_1
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
-  ; X32:   [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
-  ; X32:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.val_ptr)
-  ; X32:   ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; X32:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
-  ; X32:   G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 1)
-  ; X32:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-  ; X32:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
-  ; X32:   G_STORE [[LOAD3]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 1)
-  ; X32:   CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_variadic_call_1
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
+  ; X86:   [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
+  ; X86:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.val_ptr)
+  ; X86:   ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; X86:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+  ; X86:   G_STORE [[LOAD2]](p0), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
+  ; X86:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; X86:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+  ; X86:   G_STORE [[LOAD3]](s32), [[PTR_ADD1]](p0) :: (store 4 into stack + 4, align 1)
+  ; X86:   CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   RET 0
   ; X64-LABEL: name: test_variadic_call_1
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $rdi, $rsi
@@ -660,26 +657,26 @@ define void @test_variadic_call_1(i8** %addr_ptr, i32* %val_ptr) {
 }
 
 define void @test_variadic_call_2(i8** %addr_ptr, double* %val_ptr) {
-  ; X32-LABEL: name: test_variadic_call_2
-  ; X32: bb.1 (%ir-block.0):
-  ; X32:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
-  ; X32:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
-  ; X32:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
-  ; X32:   [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
-  ; X32:   [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
-  ; X32:   [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.val_ptr, align 4)
-  ; X32:   ADJCALLSTACKDOWN32 12, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-  ; X32:   [[GEP:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
-  ; X32:   G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 1)
-  ; X32:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
-  ; X32:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
-  ; X32:   [[GEP1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
-  ; X32:   G_STORE [[LOAD3]](s64), [[GEP1]](p0) :: (store 8 into stack + 4, align 1)
-  ; X32:   CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
-  ; X32:   ADJCALLSTACKUP32 12, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
-  ; X32:   RET 0
+  ; X86-LABEL: name: test_variadic_call_2
+  ; X86: bb.1 (%ir-block.0):
+  ; X86:   [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; X86:   [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 16)
+  ; X86:   [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; X86:   [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0)
+  ; X86:   [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
+  ; X86:   [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.val_ptr, align 4)
+  ; X86:   ADJCALLSTACKDOWN32 12, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   [[COPY:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+  ; X86:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s32)
+  ; X86:   G_STORE [[LOAD2]](p0), [[PTR_ADD]](p0) :: (store 4 into stack, align 1)
+  ; X86:   [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
+  ; X86:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
+  ; X86:   [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY1]], [[C1]](s32)
+  ; X86:   G_STORE [[LOAD3]](s64), [[PTR_ADD1]](p0) :: (store 8 into stack + 4, align 1)
+  ; X86:   CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
+  ; X86:   ADJCALLSTACKUP32 12, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+  ; X86:   RET 0
   ; X64-LABEL: name: test_variadic_call_2
   ; X64: bb.1 (%ir-block.0):
   ; X64:   liveins: $rdi, $rsi

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
index 31fb11179bb8..eb1ee5f00077 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/memop-scalar-x32.ll
@@ -1,111 +1,111 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=i386-linux-gnu                       -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_FAST
-; RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE_GREEDY
+; RUN: llc -mtriple=i386-linux-gnu                       -global-isel -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -global-isel -verify-machineinstrs < %s | FileCheck %s
 
 ;TODO merge with x86-64 tests (many operations not suppored yet)
 
 define i1 @test_load_i1(i1 * %p1) {
-; ALL-LABEL: test_load_i1:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl 4(%esp), %eax
-; ALL-NEXT:    movb (%eax), %al
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_load_i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl 4(%esp), %eax
+; CHECK-NEXT:    movb (%eax), %al
+; CHECK-NEXT:    retl
   %r = load i1, i1* %p1
   ret i1 %r
 }
 
 define i8 @test_load_i8(i8 * %p1) {
-; ALL-LABEL: test_load_i8:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl 4(%esp), %eax
-; ALL-NEXT:    movb (%eax), %al
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_load_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl 4(%esp), %eax
+; CHECK-NEXT:    movb (%eax), %al
+; CHECK-NEXT:    retl
   %r = load i8, i8* %p1
   ret i8 %r
 }
 
 define i16 @test_load_i16(i16 * %p1) {
-; ALL-LABEL: test_load_i16:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl 4(%esp), %eax
-; ALL-NEXT:    movzwl (%eax), %eax
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_load_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl 4(%esp), %eax
+; CHECK-NEXT:    movzwl (%eax), %eax
+; CHECK-NEXT:    retl
   %r = load i16, i16* %p1
   ret i16 %r
 }
 
 define i32 @test_load_i32(i32 * %p1) {
-; ALL-LABEL: test_load_i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_load_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl 4(%esp), %eax
+; CHECK-NEXT:    movl (%eax), %eax
+; CHECK-NEXT:    retl
   %r = load i32, i32* %p1
   ret i32 %r
 }
 
 define i1 * @test_store_i1(i1 %val, i1 * %p1) {
-; ALL-LABEL: test_store_i1:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movb 4(%esp), %cl
-; ALL-NEXT:    movl 8(%esp), %eax
-; ALL-NEXT:    andb $1, %cl
-; ALL-NEXT:    movb %cl, (%eax)
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_store_i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb 4(%esp), %cl
+; CHECK-NEXT:    movl 8(%esp), %eax
+; CHECK-NEXT:    andb $1, %cl
+; CHECK-NEXT:    movb %cl, (%eax)
+; CHECK-NEXT:    retl
   store i1 %val, i1* %p1
   ret i1 * %p1;
 }
 
 define i8 * @test_store_i8(i8 %val, i8 * %p1) {
-; ALL-LABEL: test_store_i8:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movb 4(%esp), %cl
-; ALL-NEXT:    movl 8(%esp), %eax
-; ALL-NEXT:    movb %cl, (%eax)
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_store_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb 4(%esp), %cl
+; CHECK-NEXT:    movl 8(%esp), %eax
+; CHECK-NEXT:    movb %cl, (%eax)
+; CHECK-NEXT:    retl
   store i8 %val, i8* %p1
   ret i8 * %p1;
 }
 
 define i16 * @test_store_i16(i16 %val, i16 * %p1) {
-; ALL-LABEL: test_store_i16:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movzwl 4(%esp), %ecx
-; ALL-NEXT:    movl 8(%esp), %eax
-; ALL-NEXT:    movw %cx, (%eax)
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_store_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzwl 4(%esp), %ecx
+; CHECK-NEXT:    movl 8(%esp), %eax
+; CHECK-NEXT:    movw %cx, (%eax)
+; CHECK-NEXT:    retl
   store i16 %val, i16* %p1
   ret i16 * %p1;
 }
 
 define i32 * @test_store_i32(i32 %val, i32 * %p1) {
-; ALL-LABEL: test_store_i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl 4(%esp), %ecx
-; ALL-NEXT:    movl 8(%esp), %eax
-; ALL-NEXT:    movl %ecx, (%eax)
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_store_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl 4(%esp), %ecx
+; CHECK-NEXT:    movl 8(%esp), %eax
+; CHECK-NEXT:    movl %ecx, (%eax)
+; CHECK-NEXT:    retl
   store i32 %val, i32* %p1
   ret i32 * %p1;
 }
 
 define i32* @test_load_ptr(i32** %ptr1) {
-; ALL-LABEL: test_load_ptr:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl 4(%esp), %eax
-; ALL-NEXT:    movl (%eax), %eax
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_load_ptr:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl 4(%esp), %eax
+; CHECK-NEXT:    movl (%eax), %eax
+; CHECK-NEXT:    retl
   %p = load i32*, i32** %ptr1
   ret i32* %p
 }
 
 define void @test_store_ptr(i32** %ptr1, i32* %a) {
-; ALL-LABEL: test_store_ptr:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl 4(%esp), %eax
-; ALL-NEXT:    movl 8(%esp), %ecx
-; ALL-NEXT:    movl %ecx, (%eax)
-; ALL-NEXT:    retl
+; CHECK-LABEL: test_store_ptr:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl 4(%esp), %eax
+; CHECK-NEXT:    movl 8(%esp), %ecx
+; CHECK-NEXT:    movl %ecx, (%eax)
+; CHECK-NEXT:    retl
   store i32* %a, i32** %ptr1
   ret void
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/mul-scalar.ll b/llvm/test/CodeGen/X86/GlobalISel/mul-scalar.ll
index ddcb63e83d10..ba0499e90350 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/mul-scalar.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/mul-scalar.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
 
 ;TODO: instruction selection not supported yet
 ;define i8 @test_mul_i8(i8 %arg1, i8 %arg2) {
@@ -8,32 +8,32 @@
 ;}
 
 define i16 @test_mul_i16(i16 %arg1, i16 %arg2) {
-; ALL-LABEL: test_mul_i16:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl %esi, %eax
-; ALL-NEXT:    imulw %di, %ax
-; ALL-NEXT:    # kill: def $ax killed $ax killed $eax
-; ALL-NEXT:    retq
+; X64-LABEL: test_mul_i16:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    imulw %di, %ax
+; X64-NEXT:    # kill: def $ax killed $ax killed $eax
+; X64-NEXT:    retq
   %ret = mul i16 %arg1, %arg2
   ret i16 %ret
 }
 
 define i32 @test_mul_i32(i32 %arg1, i32 %arg2) {
-; ALL-LABEL: test_mul_i32:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movl %esi, %eax
-; ALL-NEXT:    imull %edi, %eax
-; ALL-NEXT:    retq
+; X64-LABEL: test_mul_i32:
+; X64:       # %bb.0:
+; X64-NEXT:    movl %esi, %eax
+; X64-NEXT:    imull %edi, %eax
+; X64-NEXT:    retq
   %ret = mul i32 %arg1, %arg2
   ret i32 %ret
 }
 
 define i64 @test_mul_i64(i64 %arg1, i64 %arg2) {
-; ALL-LABEL: test_mul_i64:
-; ALL:       # %bb.0:
-; ALL-NEXT:    movq %rsi, %rax
-; ALL-NEXT:    imulq %rdi, %rax
-; ALL-NEXT:    retq
+; X64-LABEL: test_mul_i64:
+; X64:       # %bb.0:
+; X64-NEXT:    movq %rsi, %rax
+; X64-NEXT:    imulq %rdi, %rax
+; X64-NEXT:    retq
   %ret = mul i64 %arg1, %arg2
   ret i64 %ret
 }

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll b/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll
index 99689df2f371..494e52f36b41 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll
+++ b/llvm/test/CodeGen/X86/GlobalISel/ptr-add.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64_GISEL
-; RUN: llc -mtriple=x86_64-linux-gnu              -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+; RUN: llc -mtriple=x86_64-linux-gnu -global-isel -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64_GISEL
+; RUN: llc -mtriple=x86_64-linux-gnu              -verify-machineinstrs < %s -o - | FileCheck %s --check-prefix=X64
 
 define i32* @test_gep_i8(i32 *%arr, i8 %ind) {
 ; X64_GISEL-LABEL: test_gep_i8:

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
index 7ce86c93a952..d743b28325cd 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
@@ -1,5 +1,5 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel                       -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel                       -run-pass=regbankselect %s -o - | FileCheck %s
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 --global-isel -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s
 
 --- |
   define void @test_mul_vec256() {

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
index 7da4b0122e60..e8cd6ae9308c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
@@ -1,5 +1,5 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                       -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
-# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f                       -run-pass=regbankselect %s -o - | FileCheck %s
+# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s
 
 --- |
 

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir
index cd5fa912adde..c69345ccf5a2 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X32.mir
@@ -1,5 +1,5 @@
-# RUN: llc -mtriple=i386-linux-gnu                       -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
-# RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+# RUN: llc -mtriple=i386-linux-gnu                       -run-pass=regbankselect %s -o - | FileCheck %s
+# RUN: llc -mtriple=i386-linux-gnu -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s
 
 --- |
   define void @test_uadde_i32() {

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
index c7457af07fa9..7b8b48178b12 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
@@ -1,6 +1,6 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=x86_64-linux-gnu                       -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=FAST
-# RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=GREEDY
+# RUN: llc -mtriple=x86_64-linux-gnu                       -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=FAST
+# RUN: llc -mtriple=x86_64-linux-gnu -regbankselect-greedy -run-pass=regbankselect %s -o - | FileCheck %s --check-prefix=GREEDY
 
 --- |
   define i8 @test_add_i8(i8 %arg1, i8 %arg2) {

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
index d4f785dfee8c..35ab52291a6c 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=i386-linux-gnu   -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32
+# RUN: llc -mtriple=i386-linux-gnu   -run-pass=legalizer %s -o - | FileCheck %s
 --- |
 
   @g_int = global i32 0, align 4
@@ -10,17 +10,17 @@
 ...
 ---
 name:            test_global_ptrv
-# ALL-LABEL: name:  test_global_ptrv
+# CHECK-LABEL: name:  test_global_ptrv
 alignment:       16
 legalized:       false
 regBankSelected: false
-# ALL:      registers:
-# ALL-NEXT:   - { id: 0, class: _, preferred-register: '' }
+# CHECK:      registers:
+# CHECK-NEXT:   - { id: 0, class: _, preferred-register: '' }
 registers:
   - { id: 0, class: _, preferred-register: '' }
-# ALL:          %0:_(p0) = G_GLOBAL_VALUE @g_int
-# ALL-NEXT:     $eax = COPY %0(p0)
-# ALL-NEXT:     RET 0, implicit $rax
+# CHECK:          %0:_(p0) = G_GLOBAL_VALUE @g_int
+# CHECK-NEXT:     $eax = COPY %0(p0)
+# CHECK-NEXT:     RET 0, implicit $rax
 body:             |
   bb.1.entry:
     %0(p0) = G_GLOBAL_VALUE @g_int

diff  --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
index 95cc0d90c75d..5235ae04e2fc 100644
--- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
+++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64
+# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s
 --- |
 
   @g_int = global i32 0, align 4
@@ -10,17 +10,17 @@
 ...
 ---
 name:            test_global_ptrv
-# ALL-LABEL: name:  test_global_ptrv
+# CHECK-LABEL: name:  test_global_ptrv
 alignment:       16
 legalized:       false
 regBankSelected: false
-# ALL:      registers:
-# ALL-NEXT:   - { id: 0, class: _, preferred-register: '' }
+# CHECK:      registers:
+# CHECK-NEXT:   - { id: 0, class: _, preferred-register: '' }
 registers:
   - { id: 0, class: _, preferred-register: '' }
-# ALL:          %0:_(p0) = G_GLOBAL_VALUE @g_int
-# ALL-NEXT:     $rax = COPY %0(p0)
-# ALL-NEXT:     RET 0, implicit $rax
+# CHECK:          %0:_(p0) = G_GLOBAL_VALUE @g_int
+# CHECK-NEXT:     $rax = COPY %0(p0)
+# CHECK-NEXT:     RET 0, implicit $rax
 body:             |
   bb.1.entry:
     %0(p0) = G_GLOBAL_VALUE @g_int


        


More information about the llvm-commits mailing list