[llvm] cbe5985 - [X86] Replace X32 check prefixes with X86
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Mon Jan 29 08:50:59 PST 2024
Author: Simon Pilgrim
Date: 2024-01-29T16:50:32Z
New Revision: cbe5985ff7cd21924ed88b6a46fd5c04acc7fca8
URL: https://github.com/llvm/llvm-project/commit/cbe5985ff7cd21924ed88b6a46fd5c04acc7fca8
DIFF: https://github.com/llvm/llvm-project/commit/cbe5985ff7cd21924ed88b6a46fd5c04acc7fca8.diff
LOG: [X86] Replace X32 check prefixes with X86
We try to only use X32 for gnux32 triple tests.
Added:
Modified:
llvm/test/CodeGen/X86/shift-amount-mod.ll
llvm/test/CodeGen/X86/shift-and.ll
llvm/test/CodeGen/X86/shift-combine.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/shift-amount-mod.ll b/llvm/test/CodeGen/X86/shift-amount-mod.ll
index c89db15d12f45d..9f7ac748c47e16 100644
--- a/llvm/test/CodeGen/X86/shift-amount-mod.ll
+++ b/llvm/test/CodeGen/X86/shift-amount-mod.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
;==============================================================================;
@@ -10,14 +10,14 @@
;------------------------------------------------------------------------------;
define i32 @reg32_shl_by_negated(i32 %val, i32 %shamt) nounwind {
-; X32-LABEL: reg32_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shll %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_shl_by_negated:
; X64: # %bb.0:
@@ -32,15 +32,15 @@ define i32 @reg32_shl_by_negated(i32 %val, i32 %shamt) nounwind {
ret i32 %shifted
}
define i32 @load32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
-; X32-LABEL: load32_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shll %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: load32_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: load32_shl_by_negated:
; X64: # %bb.0:
@@ -56,16 +56,16 @@ define i32 @load32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
ret i32 %shifted
}
define void @store32_shl_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
-; X32-LABEL: store32_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shll %cl, %edx
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: store32_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shll %cl, %edx
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: store32_shl_by_negated:
; X64: # %bb.0:
@@ -81,13 +81,13 @@ define void @store32_shl_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind
ret void
}
define void @modify32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
-; X32-LABEL: modify32_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movb $32, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: shll %cl, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: modify32_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movb $32, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: shll %cl, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: modify32_shl_by_negated:
; X64: # %bb.0:
@@ -103,24 +103,24 @@ define void @modify32_shl_by_negated(ptr %valptr, i32 %shamt) nounwind {
}
define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
-; X32-LABEL: reg64_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: shll %cl, %eax
-; X32-NEXT: shldl %cl, %esi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB4_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: .LBB4_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: shldl %cl, %esi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB4_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: .LBB4_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_shl_by_negated:
; X64: # %bb.0:
@@ -135,25 +135,25 @@ define i64 @reg64_shl_by_negated(i64 %val, i64 %shamt) nounwind {
ret i64 %shifted
}
define i64 @load64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
-; X32-LABEL: load64_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %esi
-; X32-NEXT: movl 4(%eax), %edx
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %esi, %eax
-; X32-NEXT: shll %cl, %eax
-; X32-NEXT: shldl %cl, %esi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB5_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: .LBB5_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: load64_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %esi
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: shldl %cl, %esi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB5_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: .LBB5_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: load64_shl_by_negated:
; X64: # %bb.0:
@@ -169,29 +169,29 @@ define i64 @load64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
ret i64 %shifted
}
define void @store64_shl_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
-; X32-LABEL: store64_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: shll %cl, %esi
-; X32-NEXT: shldl %cl, %edi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB6_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: xorl %esi, %esi
-; X32-NEXT: .LBB6_2:
-; X32-NEXT: movl %edx, 4(%eax)
-; X32-NEXT: movl %esi, (%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+; X86-LABEL: store64_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: shll %cl, %esi
+; X86-NEXT: shldl %cl, %edi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB6_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: .LBB6_2:
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %esi, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: store64_shl_by_negated:
; X64: # %bb.0:
@@ -207,29 +207,29 @@ define void @store64_shl_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind
ret void
}
define void @modify64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
-; X32-LABEL: modify64_shl_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %edi
-; X32-NEXT: movl 4(%eax), %edx
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: shll %cl, %esi
-; X32-NEXT: shldl %cl, %edi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB7_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: xorl %esi, %esi
-; X32-NEXT: .LBB7_2:
-; X32-NEXT: movl %esi, (%eax)
-; X32-NEXT: movl %edx, 4(%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+; X86-LABEL: modify64_shl_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edi
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: shll %cl, %esi
+; X86-NEXT: shldl %cl, %edi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB7_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: .LBB7_2:
+; X86-NEXT: movl %esi, (%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: modify64_shl_by_negated:
; X64: # %bb.0:
@@ -248,14 +248,14 @@ define void @modify64_shl_by_negated(ptr %valptr, i64 %shamt) nounwind {
;------------------------------------------------------------------------------;
define i32 @reg32_lshr_by_negated(i32 %val, i32 %shamt) nounwind {
-; X32-LABEL: reg32_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_negated:
; X64: # %bb.0:
@@ -270,15 +270,15 @@ define i32 @reg32_lshr_by_negated(i32 %val, i32 %shamt) nounwind {
ret i32 %shifted
}
define i32 @load32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
-; X32-LABEL: load32_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: load32_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: load32_lshr_by_negated:
; X64: # %bb.0:
@@ -294,16 +294,16 @@ define i32 @load32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
ret i32 %shifted
}
define void @store32_lshr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
-; X32-LABEL: store32_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: store32_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: store32_lshr_by_negated:
; X64: # %bb.0:
@@ -319,13 +319,13 @@ define void @store32_lshr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind
ret void
}
define void @modify32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
-; X32-LABEL: modify32_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movb $32, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: shrl %cl, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: modify32_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movb $32, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: shrl %cl, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: modify32_lshr_by_negated:
; X64: # %bb.0:
@@ -341,24 +341,24 @@ define void @modify32_lshr_by_negated(ptr %valptr, i32 %shamt) nounwind {
}
define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
-; X32-LABEL: reg64_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB12_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB12_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB12_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB12_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_negated:
; X64: # %bb.0:
@@ -373,25 +373,25 @@ define i64 @reg64_lshr_by_negated(i64 %val, i64 %shamt) nounwind {
ret i64 %shifted
}
define i64 @load64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
-; X32-LABEL: load64_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl (%ecx), %eax
-; X32-NEXT: movl 4(%ecx), %esi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB13_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB13_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: load64_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %eax
+; X86-NEXT: movl 4(%ecx), %esi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB13_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB13_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: load64_lshr_by_negated:
; X64: # %bb.0:
@@ -407,29 +407,29 @@ define i64 @load64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
ret i64 %shifted
}
define void @store64_lshr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
-; X32-LABEL: store64_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: shrl %cl, %esi
-; X32-NEXT: shrdl %cl, %edi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB14_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: xorl %esi, %esi
-; X32-NEXT: .LBB14_2:
-; X32-NEXT: movl %esi, 4(%eax)
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+; X86-LABEL: store64_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: shrdl %cl, %edi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB14_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: .LBB14_2:
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: store64_lshr_by_negated:
; X64: # %bb.0:
@@ -445,29 +445,29 @@ define void @store64_lshr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind
ret void
}
define void @modify64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
-; X32-LABEL: modify64_lshr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %edx
-; X32-NEXT: movl 4(%eax), %edi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: shrl %cl, %esi
-; X32-NEXT: shrdl %cl, %edi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB15_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: xorl %esi, %esi
-; X32-NEXT: .LBB15_2:
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: movl %esi, 4(%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+; X86-LABEL: modify64_lshr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl 4(%eax), %edi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: shrdl %cl, %edi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB15_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: .LBB15_2:
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: modify64_lshr_by_negated:
; X64: # %bb.0:
@@ -486,14 +486,14 @@ define void @modify64_lshr_by_negated(ptr %valptr, i64 %shamt) nounwind {
;------------------------------------------------------------------------------;
define i32 @reg32_ashr_by_negated(i32 %val, i32 %shamt) nounwind {
-; X32-LABEL: reg32_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: sarl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_ashr_by_negated:
; X64: # %bb.0:
@@ -508,15 +508,15 @@ define i32 @reg32_ashr_by_negated(i32 %val, i32 %shamt) nounwind {
ret i32 %shifted
}
define i32 @load32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
-; X32-LABEL: load32_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: sarl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: load32_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: sarl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: load32_ashr_by_negated:
; X64: # %bb.0:
@@ -532,16 +532,16 @@ define i32 @load32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
ret i32 %shifted
}
define void @store32_ashr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind {
-; X32-LABEL: store32_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: sarl %cl, %edx
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: store32_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: sarl %cl, %edx
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: store32_ashr_by_negated:
; X64: # %bb.0:
@@ -557,13 +557,13 @@ define void @store32_ashr_by_negated(i32 %val, ptr %dstptr, i32 %shamt) nounwind
ret void
}
define void @modify32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
-; X32-LABEL: modify32_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movb $32, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: sarl %cl, (%eax)
-; X32-NEXT: retl
+; X86-LABEL: modify32_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movb $32, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: sarl %cl, (%eax)
+; X86-NEXT: retl
;
; X64-LABEL: modify32_ashr_by_negated:
; X64: # %bb.0:
@@ -579,25 +579,25 @@ define void @modify32_ashr_by_negated(ptr %valptr, i32 %shamt) nounwind {
}
define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
-; X32-LABEL: reg64_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: sarl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB20_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: sarl $31, %esi
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: .LBB20_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: sarl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB20_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: sarl $31, %esi
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: .LBB20_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_ashr_by_negated:
; X64: # %bb.0:
@@ -612,26 +612,26 @@ define i64 @reg64_ashr_by_negated(i64 %val, i64 %shamt) nounwind {
ret i64 %shifted
}
define i64 @load64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
-; X32-LABEL: load64_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl (%ecx), %eax
-; X32-NEXT: movl 4(%ecx), %esi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: sarl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB21_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: sarl $31, %esi
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: .LBB21_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: load64_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %eax
+; X86-NEXT: movl 4(%ecx), %esi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: sarl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB21_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: sarl $31, %esi
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: .LBB21_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: load64_ashr_by_negated:
; X64: # %bb.0:
@@ -647,30 +647,30 @@ define i64 @load64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
ret i64 %shifted
}
define void @store64_ashr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind {
-; X32-LABEL: store64_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: sarl %cl, %esi
-; X32-NEXT: shrdl %cl, %edi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB22_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: sarl $31, %edi
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: .LBB22_2:
-; X32-NEXT: movl %esi, 4(%eax)
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+; X86-LABEL: store64_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: sarl %cl, %esi
+; X86-NEXT: shrdl %cl, %edi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB22_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: sarl $31, %edi
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: .LBB22_2:
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: store64_ashr_by_negated:
; X64: # %bb.0:
@@ -686,30 +686,30 @@ define void @store64_ashr_by_negated(i64 %val, ptr %dstptr, i64 %shamt) nounwind
ret void
}
define void @modify64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
-; X32-LABEL: modify64_ashr_by_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %edx
-; X32-NEXT: movl 4(%eax), %edi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: sarl %cl, %esi
-; X32-NEXT: shrdl %cl, %edi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB23_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: sarl $31, %edi
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: .LBB23_2:
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: movl %esi, 4(%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+; X86-LABEL: modify64_ashr_by_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl 4(%eax), %edi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: sarl %cl, %esi
+; X86-NEXT: shrdl %cl, %edi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB23_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: sarl $31, %edi
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: .LBB23_2:
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: modify64_ashr_by_negated:
; X64: # %bb.0:
@@ -732,15 +732,15 @@ define void @modify64_ashr_by_negated(ptr %valptr, i64 %shamt) nounwind {
; subtraction from negated shift amount
define i32 @reg32_lshr_by_sub_from_negated(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_sub_from_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: negb %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_sub_from_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: negb %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_sub_from_negated:
; X64: # %bb.0:
@@ -758,26 +758,26 @@ define i32 @reg32_lshr_by_sub_from_negated(i32 %val, i32 %a, i32 %b) nounwind {
ret i32 %shifted
}
define i64 @reg64_lshr_by_sub_from_negated(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_sub_from_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb %dl, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB25_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB25_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_sub_from_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb %dl, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB25_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB25_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_sub_from_negated:
; X64: # %bb.0:
@@ -797,14 +797,14 @@ define i64 @reg64_lshr_by_sub_from_negated(i64 %val, i64 %a, i64 %b) nounwind {
; subtraction of negated shift amount
define i32 @reg32_lshr_by_sub_of_negated(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_sub_of_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_sub_of_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_sub_of_negated:
; X64: # %bb.0:
@@ -821,25 +821,25 @@ define i32 @reg32_lshr_by_sub_of_negated(i32 %val, i32 %a, i32 %b) nounwind {
ret i32 %shifted
}
define i64 @reg64_lshr_by_sub_of_negated(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_sub_of_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addb $-64, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB27_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB27_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_sub_of_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addb $-64, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB27_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB27_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_sub_of_negated:
; X64: # %bb.0:
@@ -859,14 +859,14 @@ define i64 @reg64_lshr_by_sub_of_negated(i64 %val, i64 %a, i64 %b) nounwind {
;
define i32 @reg32_lshr_by_add_to_negated(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_add_to_negated:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_add_to_negated:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_add_to_negated:
; X64: # %bb.0:
@@ -882,25 +882,25 @@ define i32 @reg32_lshr_by_add_to_negated(i32 %val, i32 %a, i32 %b) nounwind {
ret i32 %shifted
}
define i64 @reg64_lshr_by_add_to_negated(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_add_to_negated:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addb $64, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB29_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB29_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_add_to_negated:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addb $64, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB29_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB29_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_add_to_negated:
; X64: # %bb.0:
@@ -920,14 +920,14 @@ define i64 @reg64_lshr_by_add_to_negated(i64 %val, i64 %a, i64 %b) nounwind {
; subtraction of negated shift amounts
define i32 @reg32_lshr_by_sub_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_sub_of_negated_amts:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_sub_of_negated_amts:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_sub_of_negated_amts:
; X64: # %bb.0:
@@ -944,24 +944,24 @@ define i32 @reg32_lshr_by_sub_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind
ret i32 %shifted
}
define i64 @reg64_lshr_by_sub_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_sub_of_negated_amts:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB31_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB31_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_sub_of_negated_amts:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB31_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB31_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_sub_of_negated_amts:
; X64: # %bb.0:
@@ -982,15 +982,15 @@ define i64 @reg64_lshr_by_sub_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind
; addition of negated shift amounts
define i32 @reg32_lshr_by_add_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_add_of_negated_amts:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: negb %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_add_of_negated_amts:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: negb %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_add_of_negated_amts:
; X64: # %bb.0:
@@ -1009,26 +1009,26 @@ define i32 @reg32_lshr_by_add_of_negated_amts(i32 %val, i32 %a, i32 %b) nounwind
ret i32 %shifted
}
define i64 @reg64_lshr_by_add_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_add_of_negated_amts:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movb $-128, %cl
-; X32-NEXT: subb %dl, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB33_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB33_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_add_of_negated_amts:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb $-128, %cl
+; X86-NEXT: subb %dl, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB33_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB33_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_add_of_negated_amts:
; X64: # %bb.0:
@@ -1049,14 +1049,14 @@ define i64 @reg64_lshr_by_add_of_negated_amts(i64 %val, i64 %a, i64 %b) nounwind
; and patterns with an actual negation+addition
define i32 @reg32_lshr_by_negated_unfolded(i32 %val, i32 %shamt) nounwind {
-; X32-LABEL: reg32_lshr_by_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_negated_unfolded:
; X64: # %bb.0:
@@ -1072,24 +1072,24 @@ define i32 @reg32_lshr_by_negated_unfolded(i32 %val, i32 %shamt) nounwind {
ret i32 %shifted
}
define i64 @reg64_lshr_by_negated_unfolded(i64 %val, i64 %shamt) nounwind {
-; X32-LABEL: reg64_lshr_by_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB35_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB35_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB35_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB35_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_negated_unfolded:
; X64: # %bb.0:
@@ -1106,15 +1106,15 @@ define i64 @reg64_lshr_by_negated_unfolded(i64 %val, i64 %shamt) nounwind {
}
define i32 @reg32_lshr_by_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_negated_unfolded_sub_b:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: negb %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_negated_unfolded_sub_b:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: negb %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_negated_unfolded_sub_b:
; X64: # %bb.0:
@@ -1133,26 +1133,26 @@ define i32 @reg32_lshr_by_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b) nounw
ret i32 %shifted
}
define i64 @reg64_lshr_by_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_negated_unfolded_sub_b:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movb $64, %cl
-; X32-NEXT: subb %dl, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB37_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB37_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_negated_unfolded_sub_b:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb $64, %cl
+; X86-NEXT: subb %dl, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB37_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB37_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_negated_unfolded_sub_b:
; X64: # %bb.0:
@@ -1170,14 +1170,14 @@ define i64 @reg64_lshr_by_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b) nounw
}
define i32 @reg32_lshr_by_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_b_sub_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_b_sub_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_b_sub_negated_unfolded:
; X64: # %bb.0:
@@ -1195,25 +1195,25 @@ define i32 @reg32_lshr_by_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b) nounw
ret i32 %shifted
}
define i64 @reg64_lshr_by_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_b_sub_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addb $-64, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB39_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB39_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_b_sub_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addb $-64, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB39_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB39_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_b_sub_negated_unfolded:
; X64: # %bb.0:
@@ -1230,14 +1230,14 @@ define i64 @reg64_lshr_by_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b) nounw
}
define i32 @reg32_lshr_by_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_negated_unfolded_add_b:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_negated_unfolded_add_b:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_negated_unfolded_add_b:
; X64: # %bb.0:
@@ -1254,25 +1254,25 @@ define i32 @reg32_lshr_by_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b) nounw
ret i32 %shifted
}
define i64 @reg64_lshr_by_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_negated_unfolded_add_b:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: addb $64, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB41_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB41_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_negated_unfolded_add_b:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: addb $64, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB41_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB41_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_negated_unfolded_add_b:
; X64: # %bb.0:
@@ -1293,14 +1293,14 @@ define i64 @reg64_lshr_by_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b) nounw
; and patterns with an actual negation+mask
define i32 @reg32_lshr_by_masked_negated_unfolded(i32 %val, i32 %shamt) nounwind {
-; X32-LABEL: reg32_lshr_by_masked_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_masked_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_masked_negated_unfolded:
; X64: # %bb.0:
@@ -1316,25 +1316,25 @@ define i32 @reg32_lshr_by_masked_negated_unfolded(i32 %val, i32 %shamt) nounwind
ret i32 %shifted
}
define i64 @reg64_lshr_by_masked_negated_unfolded(i64 %val, i64 %shamt) nounwind {
-; X32-LABEL: reg64_lshr_by_masked_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: subb %dl, %cl
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB43_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB43_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_masked_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: subb %dl, %cl
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB43_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB43_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_masked_negated_unfolded:
; X64: # %bb.0:
@@ -1351,16 +1351,16 @@ define i64 @reg64_lshr_by_masked_negated_unfolded(i64 %val, i64 %shamt) nounwind
}
define i32 @reg32_lshr_by_masked_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_masked_negated_unfolded_sub_b:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: andl $31, %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_masked_negated_unfolded_sub_b:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $31, %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_masked_negated_unfolded_sub_b:
; X64: # %bb.0:
@@ -1379,26 +1379,26 @@ define i32 @reg32_lshr_by_masked_negated_unfolded_sub_b(i32 %val, i32 %a, i32 %b
ret i32 %shifted
}
define i64 @reg64_lshr_by_masked_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_masked_negated_unfolded_sub_b:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: andl $63, %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB45_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB45_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_masked_negated_unfolded_sub_b:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $63, %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB45_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB45_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_masked_negated_unfolded_sub_b:
; X64: # %bb.0:
@@ -1418,17 +1418,17 @@ define i64 @reg64_lshr_by_masked_negated_unfolded_sub_b(i64 %val, i64 %a, i64 %b
}
define i32 @reg32_lshr_by_masked_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_masked_b_sub_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: andl $31, %edx
-; X32-NEXT: subl %edx, %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_masked_b_sub_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: andl $31, %edx
+; X86-NEXT: subl %edx, %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_masked_b_sub_negated_unfolded:
; X64: # %bb.0:
@@ -1447,27 +1447,27 @@ define i32 @reg32_lshr_by_masked_b_sub_negated_unfolded(i32 %val, i32 %a, i32 %b
ret i32 %shifted
}
define i64 @reg64_lshr_by_masked_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_masked_b_sub_negated_unfolded:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: andl $63, %edx
-; X32-NEXT: subl %edx, %ecx
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB47_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB47_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_masked_b_sub_negated_unfolded:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: andl $63, %edx
+; X86-NEXT: subl %edx, %ecx
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB47_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB47_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_masked_b_sub_negated_unfolded:
; X64: # %bb.0:
@@ -1487,16 +1487,16 @@ define i64 @reg64_lshr_by_masked_b_sub_negated_unfolded(i64 %val, i64 %a, i64 %b
}
define i32 @reg32_lshr_by_masked_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b) nounwind {
-; X32-LABEL: reg32_lshr_by_masked_negated_unfolded_add_b:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: andl $31, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-NEXT: shrl %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: reg32_lshr_by_masked_negated_unfolded_add_b:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $31, %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-NEXT: shrl %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: reg32_lshr_by_masked_negated_unfolded_add_b:
; X64: # %bb.0:
@@ -1516,26 +1516,26 @@ define i32 @reg32_lshr_by_masked_negated_unfolded_add_b(i32 %val, i32 %a, i32 %b
ret i32 %shifted
}
define i64 @reg64_lshr_by_masked_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b) nounwind {
-; X32-LABEL: reg64_lshr_by_masked_negated_unfolded_add_b:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: xorl %ecx, %ecx
-; X32-NEXT: subl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: andl $63, %ecx
-; X32-NEXT: addl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB49_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB49_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: reg64_lshr_by_masked_negated_unfolded_add_b:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: xorl %ecx, %ecx
+; X86-NEXT: subl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl $63, %ecx
+; X86-NEXT: addl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB49_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB49_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: reg64_lshr_by_masked_negated_unfolded_add_b:
; X64: # %bb.0:
@@ -1554,13 +1554,13 @@ define i64 @reg64_lshr_by_masked_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b
}
define i16 @sh_trunc_sh(i64 %x) {
-; X32-LABEL: sh_trunc_sh:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shrl $4, %eax
-; X32-NEXT: andl $15, %eax
-; X32-NEXT: # kill: def $ax killed $ax killed $eax
-; X32-NEXT: retl
+; X86-LABEL: sh_trunc_sh:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl $4, %eax
+; X86-NEXT: andl $15, %eax
+; X86-NEXT: # kill: def $ax killed $ax killed $eax
+; X86-NEXT: retl
;
; X64-LABEL: sh_trunc_sh:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/shift-and.ll b/llvm/test/CodeGen/X86/shift-and.ll
index f6d73b1fbc6e7c..e61ba4923f7928 100644
--- a/llvm/test/CodeGen/X86/shift-and.ll
+++ b/llvm/test/CodeGen/X86/shift-and.ll
@@ -1,14 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=i386-unknown-unknown | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
define i32 @t1(i32 %t, i32 %val) nounwind {
-; X32-LABEL: t1:
-; X32: # %bb.0:
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: t1:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: t1:
; X64: # %bb.0:
@@ -23,12 +23,12 @@ define i32 @t1(i32 %t, i32 %val) nounwind {
}
define i32 @t2(i32 %t, i32 %val) nounwind {
-; X32-LABEL: t2:
-; X32: # %bb.0:
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll %cl, %eax
-; X32-NEXT: retl
+; X86-LABEL: t2:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: retl
;
; X64-LABEL: t2:
; X64: # %bb.0:
@@ -45,11 +45,11 @@ define i32 @t2(i32 %t, i32 %val) nounwind {
@X = internal global i16 0
define void @t3(i16 %t) nounwind {
-; X32-LABEL: t3:
-; X32: # %bb.0:
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: sarw %cl, X
-; X32-NEXT: retl
+; X86-LABEL: t3:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: sarw %cl, X
+; X86-NEXT: retl
;
; X64-LABEL: t3:
; X64: # %bb.0:
@@ -65,23 +65,23 @@ define void @t3(i16 %t) nounwind {
}
define i64 @t4(i64 %t, i64 %val) nounwind {
-; X32-LABEL: t4:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB3_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB3_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: t4:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB3_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB3_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: t4:
; X64: # %bb.0:
@@ -96,23 +96,23 @@ define i64 @t4(i64 %t, i64 %val) nounwind {
}
define i64 @t5(i64 %t, i64 %val) nounwind {
-; X32-LABEL: t5:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: shrl %cl, %edx
-; X32-NEXT: shrdl %cl, %esi, %eax
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB4_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: .LBB4_2:
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: t5:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: shrl %cl, %edx
+; X86-NEXT: shrdl %cl, %esi, %eax
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB4_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: .LBB4_2:
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: t5:
; X64: # %bb.0:
@@ -127,28 +127,28 @@ define i64 @t5(i64 %t, i64 %val) nounwind {
}
define void @t5ptr(i64 %t, ptr %ptr) nounwind {
-; X32-LABEL: t5ptr:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl (%eax), %edx
-; X32-NEXT: movl 4(%eax), %edi
-; X32-NEXT: movl %edi, %esi
-; X32-NEXT: shrl %cl, %esi
-; X32-NEXT: shrdl %cl, %edi, %edx
-; X32-NEXT: testb $32, %cl
-; X32-NEXT: je .LBB5_2
-; X32-NEXT: # %bb.1:
-; X32-NEXT: movl %esi, %edx
-; X32-NEXT: xorl %esi, %esi
-; X32-NEXT: .LBB5_2:
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: movl %esi, 4(%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl
+; X86-LABEL: t5ptr:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl 4(%eax), %edi
+; X86-NEXT: movl %edi, %esi
+; X86-NEXT: shrl %cl, %esi
+; X86-NEXT: shrdl %cl, %edi, %edx
+; X86-NEXT: testb $32, %cl
+; X86-NEXT: je .LBB5_2
+; X86-NEXT: # %bb.1:
+; X86-NEXT: movl %esi, %edx
+; X86-NEXT: xorl %esi, %esi
+; X86-NEXT: .LBB5_2:
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl
;
; X64-LABEL: t5ptr:
; X64: # %bb.0:
@@ -166,23 +166,23 @@ define void @t5ptr(i64 %t, ptr %ptr) nounwind {
; rdar://11866926
define i64 @t6(i64 %key, ptr nocapture %val) nounwind {
-; X32-LABEL: t6:
-; X32: # %bb.0:
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shrdl $3, %eax, %ecx
-; X32-NEXT: movl %eax, %esi
-; X32-NEXT: shrl $3, %esi
-; X32-NEXT: movl (%edx), %eax
-; X32-NEXT: movl 4(%edx), %edx
-; X32-NEXT: addl $-1, %eax
-; X32-NEXT: adcl $-1, %edx
-; X32-NEXT: andl %ecx, %eax
-; X32-NEXT: andl %esi, %edx
-; X32-NEXT: popl %esi
-; X32-NEXT: retl
+; X86-LABEL: t6:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrdl $3, %eax, %ecx
+; X86-NEXT: movl %eax, %esi
+; X86-NEXT: shrl $3, %esi
+; X86-NEXT: movl (%edx), %eax
+; X86-NEXT: movl 4(%edx), %edx
+; X86-NEXT: addl $-1, %eax
+; X86-NEXT: adcl $-1, %edx
+; X86-NEXT: andl %ecx, %eax
+; X86-NEXT: andl %esi, %edx
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
;
; X64-LABEL: t6:
; X64: # %bb.0:
@@ -199,13 +199,13 @@ define i64 @t6(i64 %key, ptr nocapture %val) nounwind {
}
define i64 @big_mask_constant(i64 %x) nounwind {
-; X32-LABEL: big_mask_constant:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: andl $4, %eax
-; X32-NEXT: shll $25, %eax
-; X32-NEXT: xorl %edx, %edx
-; X32-NEXT: retl
+; X86-LABEL: big_mask_constant:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $4, %eax
+; X86-NEXT: shll $25, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: retl
;
; X64-LABEL: big_mask_constant:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/shift-combine.ll b/llvm/test/CodeGen/X86/shift-combine.ll
index cf45641fba6321..bb0fd9c68afbaf 100644
--- a/llvm/test/CodeGen/X86/shift-combine.ll
+++ b/llvm/test/CodeGen/X86/shift-combine.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=i686-unknown < %s | FileCheck %s --check-prefix=X32
+; RUN: llc -mtriple=i686-unknown < %s | FileCheck %s --check-prefix=X86
; RUN: llc -mtriple=x86_64-unknown < %s | FileCheck %s --check-prefix=X64
@array = weak dso_local global [4 x i32] zeroinitializer
define dso_local i32 @test_lshr_and(i32 %x) {
-; X32-LABEL: test_lshr_and:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: andl $12, %eax
-; X32-NEXT: movl array(%eax), %eax
-; X32-NEXT: retl
+; X86-LABEL: test_lshr_and:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl $12, %eax
+; X86-NEXT: movl array(%eax), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test_lshr_and:
; X64: # %bb.0:
@@ -26,13 +26,13 @@ define dso_local i32 @test_lshr_and(i32 %x) {
}
define dso_local ptr @test_exact1(i32 %a, i32 %b, ptr %x) {
-; X32-LABEL: test_exact1:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: sarl %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: test_exact1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test_exact1:
; X64: # %bb.0:
@@ -48,13 +48,13 @@ define dso_local ptr @test_exact1(i32 %a, i32 %b, ptr %x) {
}
define dso_local ptr @test_exact2(i32 %a, i32 %b, ptr %x) {
-; X32-LABEL: test_exact2:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: sarl %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: test_exact2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: sarl %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test_exact2:
; X64: # %bb.0:
@@ -70,12 +70,12 @@ define dso_local ptr @test_exact2(i32 %a, i32 %b, ptr %x) {
}
define dso_local ptr @test_exact3(i32 %a, i32 %b, ptr %x) {
-; X32-LABEL: test_exact3:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: test_exact3:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test_exact3:
; X64: # %bb.0:
@@ -91,13 +91,13 @@ define dso_local ptr @test_exact3(i32 %a, i32 %b, ptr %x) {
}
define dso_local ptr @test_exact4(i32 %a, i32 %b, ptr %x) {
-; X32-LABEL: test_exact4:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shrl %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: test_exact4:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test_exact4:
; X64: # %bb.0:
@@ -113,13 +113,13 @@ define dso_local ptr @test_exact4(i32 %a, i32 %b, ptr %x) {
}
define dso_local ptr @test_exact5(i32 %a, i32 %b, ptr %x) {
-; X32-LABEL: test_exact5:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shrl %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: test_exact5:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test_exact5:
; X64: # %bb.0:
@@ -135,12 +135,12 @@ define dso_local ptr @test_exact5(i32 %a, i32 %b, ptr %x) {
}
define dso_local ptr @test_exact6(i32 %a, i32 %b, ptr %x) {
-; X32-LABEL: test_exact6:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: test_exact6:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: test_exact6:
; X64: # %bb.0:
@@ -157,13 +157,13 @@ define dso_local ptr @test_exact6(i32 %a, i32 %b, ptr %x) {
; PR42644 - https://bugs.llvm.org/show_bug.cgi?id=42644
define i64 @ashr_add_shl_i32(i64 %r) nounwind {
-; X32-LABEL: ashr_add_shl_i32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: incl %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: sarl $31, %edx
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_i32:
; X64: # %bb.0:
@@ -177,14 +177,14 @@ define i64 @ashr_add_shl_i32(i64 %r) nounwind {
}
define i64 @ashr_add_shl_i8(i64 %r) nounwind {
-; X32-LABEL: ashr_add_shl_i8:
-; X32: # %bb.0:
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: addb $2, %al
-; X32-NEXT: movsbl %al, %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: sarl $31, %edx
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_i8:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: addb $2, %al
+; X86-NEXT: movsbl %al, %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_i8:
; X64: # %bb.0:
@@ -198,30 +198,30 @@ define i64 @ashr_add_shl_i8(i64 %r) nounwind {
}
define <4 x i32> @ashr_add_shl_v4i8(<4 x i32> %r) nounwind {
-; X32-LABEL: ashr_add_shl_v4i8:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movb {{[0-9]+}}(%esp), %ch
-; X32-NEXT: movb {{[0-9]+}}(%esp), %dh
-; X32-NEXT: incb %dh
-; X32-NEXT: movsbl %dh, %esi
-; X32-NEXT: incb %ch
-; X32-NEXT: movsbl %ch, %edi
-; X32-NEXT: incb %dl
-; X32-NEXT: movsbl %dl, %edx
-; X32-NEXT: incb %cl
-; X32-NEXT: movsbl %cl, %ecx
-; X32-NEXT: movl %ecx, 12(%eax)
-; X32-NEXT: movl %edx, 8(%eax)
-; X32-NEXT: movl %edi, 4(%eax)
-; X32-NEXT: movl %esi, (%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl $4
+; X86-LABEL: ashr_add_shl_v4i8:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movb {{[0-9]+}}(%esp), %ch
+; X86-NEXT: movb {{[0-9]+}}(%esp), %dh
+; X86-NEXT: incb %dh
+; X86-NEXT: movsbl %dh, %esi
+; X86-NEXT: incb %ch
+; X86-NEXT: movsbl %ch, %edi
+; X86-NEXT: incb %dl
+; X86-NEXT: movsbl %dl, %edx
+; X86-NEXT: incb %cl
+; X86-NEXT: movsbl %cl, %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl %edx, 8(%eax)
+; X86-NEXT: movl %edi, 4(%eax)
+; X86-NEXT: movl %esi, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
;
; X64-LABEL: ashr_add_shl_v4i8:
; X64: # %bb.0:
@@ -236,14 +236,14 @@ define <4 x i32> @ashr_add_shl_v4i8(<4 x i32> %r) nounwind {
}
define i64 @ashr_add_shl_i36(i64 %r) nounwind {
-; X32-LABEL: ashr_add_shl_i36:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: shll $4, %edx
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: sarl $4, %eax
-; X32-NEXT: sarl $31, %edx
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_i36:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shll $4, %edx
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: sarl $4, %eax
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_i36:
; X64: # %bb.0:
@@ -258,13 +258,13 @@ define i64 @ashr_add_shl_i36(i64 %r) nounwind {
}
define i64 @ashr_add_shl_mismatch_shifts1(i64 %r) nounwind {
-; X32-LABEL: ashr_add_shl_mismatch_shifts1:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: incl %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: sarl $31, %edx
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_mismatch_shifts1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: incl %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_mismatch_shifts1:
; X64: # %bb.0:
@@ -280,15 +280,15 @@ define i64 @ashr_add_shl_mismatch_shifts1(i64 %r) nounwind {
}
define i64 @ashr_add_shl_mismatch_shifts2(i64 %r) nounwind {
-; X32-LABEL: ashr_add_shl_mismatch_shifts2:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: shrl $8, %edx
-; X32-NEXT: incl %edx
-; X32-NEXT: shrdl $8, %edx, %eax
-; X32-NEXT: shrl $8, %edx
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_mismatch_shifts2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shrl $8, %edx
+; X86-NEXT: incl %edx
+; X86-NEXT: shrdl $8, %edx, %eax
+; X86-NEXT: shrl $8, %edx
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_mismatch_shifts2:
; X64: # %bb.0:
@@ -304,15 +304,15 @@ define i64 @ashr_add_shl_mismatch_shifts2(i64 %r) nounwind {
}
define dso_local i32 @ashr_add_shl_i32_i8_extra_use1(i32 %r, ptr %p) nounwind {
-; X32-LABEL: ashr_add_shl_i32_i8_extra_use1:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $24, %eax
-; X32-NEXT: addl $33554432, %eax # imm = 0x2000000
-; X32-NEXT: movl %eax, (%ecx)
-; X32-NEXT: sarl $24, %eax
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_i32_i8_extra_use1:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $24, %eax
+; X86-NEXT: addl $33554432, %eax # imm = 0x2000000
+; X86-NEXT: movl %eax, (%ecx)
+; X86-NEXT: sarl $24, %eax
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_i32_i8_extra_use1:
; X64: # %bb.0:
@@ -330,15 +330,15 @@ define dso_local i32 @ashr_add_shl_i32_i8_extra_use1(i32 %r, ptr %p) nounwind {
}
define dso_local i32 @ashr_add_shl_i32_i8_extra_use2(i32 %r, ptr %p) nounwind {
-; X32-LABEL: ashr_add_shl_i32_i8_extra_use2:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $24, %eax
-; X32-NEXT: movl %eax, (%ecx)
-; X32-NEXT: addl $33554432, %eax # imm = 0x2000000
-; X32-NEXT: sarl $24, %eax
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_i32_i8_extra_use2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $24, %eax
+; X86-NEXT: movl %eax, (%ecx)
+; X86-NEXT: addl $33554432, %eax # imm = 0x2000000
+; X86-NEXT: sarl $24, %eax
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_i32_i8_extra_use2:
; X64: # %bb.0:
@@ -356,17 +356,17 @@ define dso_local i32 @ashr_add_shl_i32_i8_extra_use2(i32 %r, ptr %p) nounwind {
}
define dso_local i32 @ashr_add_shl_i32_i8_extra_use3(i32 %r, ptr %p1, ptr %p2) nounwind {
-; X32-LABEL: ashr_add_shl_i32_i8_extra_use3:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $24, %eax
-; X32-NEXT: movl %eax, (%edx)
-; X32-NEXT: addl $33554432, %eax # imm = 0x2000000
-; X32-NEXT: movl %eax, (%ecx)
-; X32-NEXT: sarl $24, %eax
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_shl_i32_i8_extra_use3:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $24, %eax
+; X86-NEXT: movl %eax, (%edx)
+; X86-NEXT: addl $33554432, %eax # imm = 0x2000000
+; X86-NEXT: movl %eax, (%ecx)
+; X86-NEXT: sarl $24, %eax
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_shl_i32_i8_extra_use3:
; X64: # %bb.0:
@@ -388,13 +388,13 @@ define dso_local i32 @ashr_add_shl_i32_i8_extra_use3(i32 %r, ptr %p1, ptr %p2) n
%"class.QPainterPath" = type { double, double, i32 }
define dso_local void @PR42880(i32 %t0) {
-; X32-LABEL: PR42880:
-; X32: # %bb.0:
-; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: testb %al, %al
-; X32-NEXT: je .LBB16_1
-; X32-NEXT: # %bb.2: # %if
-; X32-NEXT: .LBB16_1: # %then
+; X86-LABEL: PR42880:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: testb %al, %al
+; X86-NEXT: je .LBB16_1
+; X86-NEXT: # %bb.2: # %if
+; X86-NEXT: .LBB16_1: # %then
;
; X64-LABEL: PR42880:
; X64: # %bb.0:
@@ -420,13 +420,13 @@ if:
; The mul here is the equivalent of (neg (shl X, 32)).
define i64 @ashr_add_neg_shl_i32(i64 %r) nounwind {
-; X32-LABEL: ashr_add_neg_shl_i32:
-; X32: # %bb.0:
-; X32-NEXT: movl $1, %eax
-; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %eax, %edx
-; X32-NEXT: sarl $31, %edx
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_neg_shl_i32:
+; X86: # %bb.0:
+; X86-NEXT: movl $1, %eax
+; X86-NEXT: subl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %edx
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_neg_shl_i32:
; X64: # %bb.0:
@@ -442,16 +442,16 @@ define i64 @ashr_add_neg_shl_i32(i64 %r) nounwind {
; The mul here is the equivalent of (neg (shl X, 56)).
define i64 @ashr_add_neg_shl_i8(i64 %r) nounwind {
-; X32-LABEL: ashr_add_neg_shl_i8:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $24, %eax
-; X32-NEXT: movl $33554432, %edx # imm = 0x2000000
-; X32-NEXT: subl %eax, %edx
-; X32-NEXT: movl %edx, %eax
-; X32-NEXT: sarl $24, %eax
-; X32-NEXT: sarl $31, %edx
-; X32-NEXT: retl
+; X86-LABEL: ashr_add_neg_shl_i8:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $24, %eax
+; X86-NEXT: movl $33554432, %edx # imm = 0x2000000
+; X86-NEXT: subl %eax, %edx
+; X86-NEXT: movl %edx, %eax
+; X86-NEXT: sarl $24, %eax
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: retl
;
; X64-LABEL: ashr_add_neg_shl_i8:
; X64: # %bb.0:
@@ -467,30 +467,30 @@ define i64 @ashr_add_neg_shl_i8(i64 %r) nounwind {
; The mul here is the equivalent of (neg (shl X, 24)).
define <4 x i32> @ashr_add_neg_shl_v4i8(<4 x i32> %r) nounwind {
-; X32-LABEL: ashr_add_neg_shl_v4i8:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: pushl %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movb $1, %cl
-; X32-NEXT: movb $1, %dl
-; X32-NEXT: subb {{[0-9]+}}(%esp), %dl
-; X32-NEXT: movsbl %dl, %edx
-; X32-NEXT: movb $1, %ch
-; X32-NEXT: subb {{[0-9]+}}(%esp), %ch
-; X32-NEXT: movsbl %ch, %esi
-; X32-NEXT: movb $1, %ch
-; X32-NEXT: subb {{[0-9]+}}(%esp), %ch
-; X32-NEXT: movsbl %ch, %edi
-; X32-NEXT: subb {{[0-9]+}}(%esp), %cl
-; X32-NEXT: movsbl %cl, %ecx
-; X32-NEXT: movl %ecx, 12(%eax)
-; X32-NEXT: movl %edi, 8(%eax)
-; X32-NEXT: movl %esi, 4(%eax)
-; X32-NEXT: movl %edx, (%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: popl %edi
-; X32-NEXT: retl $4
+; X86-LABEL: ashr_add_neg_shl_v4i8:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movb $1, %cl
+; X86-NEXT: movb $1, %dl
+; X86-NEXT: subb {{[0-9]+}}(%esp), %dl
+; X86-NEXT: movsbl %dl, %edx
+; X86-NEXT: movb $1, %ch
+; X86-NEXT: subb {{[0-9]+}}(%esp), %ch
+; X86-NEXT: movsbl %ch, %esi
+; X86-NEXT: movb $1, %ch
+; X86-NEXT: subb {{[0-9]+}}(%esp), %ch
+; X86-NEXT: movsbl %ch, %edi
+; X86-NEXT: subb {{[0-9]+}}(%esp), %cl
+; X86-NEXT: movsbl %cl, %ecx
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl %edi, 8(%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: retl $4
;
; X64-LABEL: ashr_add_neg_shl_v4i8:
; X64: # %bb.0:
@@ -507,14 +507,14 @@ define <4 x i32> @ashr_add_neg_shl_v4i8(<4 x i32> %r) nounwind {
}
define i32 @or_tree_with_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
-; X32-LABEL: or_tree_with_shifts_i32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $16, %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: or_tree_with_shifts_i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: or_tree_with_shifts_i32:
; X64: # %bb.0:
@@ -533,14 +533,14 @@ define i32 @or_tree_with_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
}
define i32 @xor_tree_with_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
-; X32-LABEL: xor_tree_with_shifts_i32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shrl $16, %eax
-; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: xorl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: xor_tree_with_shifts_i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shrl $16, %eax
+; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: xor_tree_with_shifts_i32:
; X64: # %bb.0:
@@ -559,14 +559,14 @@ define i32 @xor_tree_with_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
}
define i32 @and_tree_with_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
-; X32-LABEL: and_tree_with_shifts_i32:
-; X32: # %bb.0:
-; X32-NEXT: movswl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movswl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: andl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: andl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: andl %ecx, %eax
-; X32-NEXT: retl
+; X86-LABEL: and_tree_with_shifts_i32:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: andl %ecx, %eax
+; X86-NEXT: retl
;
; X64-LABEL: and_tree_with_shifts_i32:
; X64: # %bb.0:
@@ -585,15 +585,15 @@ define i32 @and_tree_with_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
}
define i32 @logic_tree_with_shifts_var_i32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %s) {
-; X32-LABEL: logic_tree_with_shifts_var_i32:
-; X32: # %bb.0:
-; X32-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll %cl, %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: retl
+; X86-LABEL: logic_tree_with_shifts_var_i32:
+; X86: # %bb.0:
+; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll %cl, %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: retl
;
; X64-LABEL: logic_tree_with_shifts_var_i32:
; X64: # %bb.0:
@@ -613,16 +613,16 @@ define i32 @logic_tree_with_shifts_var_i32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %
}
define i32 @logic_tree_with_mismatching_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
-; X32-LABEL: logic_tree_with_mismatching_shifts_i32:
-; X32: # %bb.0:
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shll $15, %ecx
-; X32-NEXT: shll $16, %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl %ecx, %eax
-; X32-NEXT: retl
+; X86-LABEL: logic_tree_with_mismatching_shifts_i32:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $15, %ecx
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: retl
;
; X64-LABEL: logic_tree_with_mismatching_shifts_i32:
; X64: # %bb.0:
@@ -642,15 +642,15 @@ define i32 @logic_tree_with_mismatching_shifts_i32(i32 %a, i32 %b, i32 %c, i32 %
}
define i32 @logic_tree_with_mismatching_shifts2_i32(i32 %a, i32 %b, i32 %c, i32 %d) {
-; X32-LABEL: logic_tree_with_mismatching_shifts2_i32:
-; X32: # %bb.0:
-; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shll $16, %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl %ecx, %eax
-; X32-NEXT: retl
+; X86-LABEL: logic_tree_with_mismatching_shifts2_i32:
+; X86: # %bb.0:
+; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $16, %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl %ecx, %eax
+; X86-NEXT: retl
;
; X64-LABEL: logic_tree_with_mismatching_shifts2_i32:
; X64: # %bb.0:
@@ -670,44 +670,44 @@ define i32 @logic_tree_with_mismatching_shifts2_i32(i32 %a, i32 %b, i32 %c, i32
}
define <4 x i32> @or_tree_with_shifts_vec_i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
-; X32-LABEL: or_tree_with_shifts_vec_i32:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %edi, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: shll $16, %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: shll $16, %edx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: shll $16, %esi
-; X32-NEXT: orl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: orl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: shll $16, %edi
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl %edi, 12(%eax)
-; X32-NEXT: movl %esi, 8(%eax)
-; X32-NEXT: movl %edx, 4(%eax)
-; X32-NEXT: movl %ecx, (%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: popl %edi
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: retl $4
+; X86-LABEL: or_tree_with_shifts_vec_i32:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 12
+; X86-NEXT: .cfi_offset %esi, -12
+; X86-NEXT: .cfi_offset %edi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: shll $16, %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: shll $16, %edx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: shll $16, %esi
+; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: shll $16, %edi
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: movl %esi, 8(%eax)
+; X86-NEXT: movl %edx, 4(%eax)
+; X86-NEXT: movl %ecx, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: popl %edi
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: retl $4
;
; X64-LABEL: or_tree_with_shifts_vec_i32:
; X64: # %bb.0:
@@ -725,52 +725,52 @@ define <4 x i32> @or_tree_with_shifts_vec_i32(<4 x i32> %a, <4 x i32> %b, <4 x i
}
define <4 x i32> @or_tree_with_mismatching_shifts_vec_i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
-; X32-LABEL: or_tree_with_mismatching_shifts_vec_i32:
-; X32: # %bb.0:
-; X32-NEXT: pushl %edi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: pushl %esi
-; X32-NEXT: .cfi_def_cfa_offset 12
-; X32-NEXT: .cfi_offset %esi, -12
-; X32-NEXT: .cfi_offset %edi, -8
-; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $16, %eax
-; X32-NEXT: shll $17, %ecx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: orl %eax, %ecx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $16, %eax
-; X32-NEXT: shll $17, %edx
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edx
-; X32-NEXT: orl %eax, %edx
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $16, %eax
-; X32-NEXT: shll $17, %esi
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %esi
-; X32-NEXT: orl %eax, %esi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: shll $16, %eax
-; X32-NEXT: shll $17, %edi
-; X32-NEXT: orl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: orl {{[0-9]+}}(%esp), %edi
-; X32-NEXT: orl %eax, %edi
-; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: movl %ecx, 12(%eax)
-; X32-NEXT: movl %edx, 8(%eax)
-; X32-NEXT: movl %esi, 4(%eax)
-; X32-NEXT: movl %edi, (%eax)
-; X32-NEXT: popl %esi
-; X32-NEXT: .cfi_def_cfa_offset 8
-; X32-NEXT: popl %edi
-; X32-NEXT: .cfi_def_cfa_offset 4
-; X32-NEXT: retl $4
+; X86-LABEL: or_tree_with_mismatching_shifts_vec_i32:
+; X86: # %bb.0:
+; X86-NEXT: pushl %edi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: pushl %esi
+; X86-NEXT: .cfi_def_cfa_offset 12
+; X86-NEXT: .cfi_offset %esi, -12
+; X86-NEXT: .cfi_offset %edi, -8
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: shll $17, %ecx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: orl %eax, %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: shll $17, %edx
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edx
+; X86-NEXT: orl %eax, %edx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: shll $17, %esi
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: orl %eax, %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: shll $16, %eax
+; X86-NEXT: shll $17, %edi
+; X86-NEXT: orl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: orl {{[0-9]+}}(%esp), %edi
+; X86-NEXT: orl %eax, %edi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %ecx, 12(%eax)
+; X86-NEXT: movl %edx, 8(%eax)
+; X86-NEXT: movl %esi, 4(%eax)
+; X86-NEXT: movl %edi, (%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: .cfi_def_cfa_offset 8
+; X86-NEXT: popl %edi
+; X86-NEXT: .cfi_def_cfa_offset 4
+; X86-NEXT: retl $4
;
; X64-LABEL: or_tree_with_mismatching_shifts_vec_i32:
; X64: # %bb.0:
More information about the llvm-commits
mailing list