[llvm] 55aecfb - [X86] Rename funnel-shift X32 check prefixes to X86
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 17 08:06:50 PST 2021
Author: Simon Pilgrim
Date: 2021-12-17T16:06:40Z
New Revision: 55aecfb936ccb05542af3f038a7f76fed2374c20
URL: https://github.com/llvm/llvm-project/commit/55aecfb936ccb05542af3f038a7f76fed2374c20
DIFF: https://github.com/llvm/llvm-project/commit/55aecfb936ccb05542af3f038a7f76fed2374c20.diff
LOG: [X86] Rename funnel-shift X32 check prefixes to X86
We try to use X32 for gnux32 triple checks only
Added:
Modified:
llvm/test/CodeGen/X86/funnel-shift-rot.ll
llvm/test/CodeGen/X86/funnel-shift.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/funnel-shift-rot.ll b/llvm/test/CodeGen/X86/funnel-shift-rot.ll
index a73ef92f9ff6..c95df7bcd67b 100644
--- a/llvm/test/CodeGen/X86/funnel-shift-rot.ll
+++ b/llvm/test/CodeGen/X86/funnel-shift-rot.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-- -mattr=sse2 | FileCheck %s --check-prefixes=ANY,X32-SSE2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=ANY,X64-AVX2
+; RUN: llc < %s -mtriple=i686-- -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,X64-AVX2
declare i8 @llvm.fshl.i8(i8, i8, i8)
declare i16 @llvm.fshl.i16(i16, i16, i16)
@@ -17,11 +17,11 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
; When first 2 operands match, it's a rotate.
define i8 @rotl_i8_const_shift(i8 %x) nounwind {
-; X32-SSE2-LABEL: rotl_i8_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-SSE2-NEXT: rolb $3, %al
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_i8_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-SSE2-NEXT: rolb $3, %al
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_i8_const_shift:
; X64-AVX2: # %bb.0:
@@ -34,11 +34,11 @@ define i8 @rotl_i8_const_shift(i8 %x) nounwind {
}
define i8 @rotl_i8_const_shift1(i8 %x) nounwind {
-; X32-SSE2-LABEL: rotl_i8_const_shift1:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-SSE2-NEXT: rolb %al
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_i8_const_shift1:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-SSE2-NEXT: rolb %al
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_i8_const_shift1:
; X64-AVX2: # %bb.0:
@@ -51,11 +51,11 @@ define i8 @rotl_i8_const_shift1(i8 %x) nounwind {
}
define i8 @rotl_i8_const_shift7(i8 %x) nounwind {
-; X32-SSE2-LABEL: rotl_i8_const_shift7:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-SSE2-NEXT: rorb %al
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_i8_const_shift7:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-SSE2-NEXT: rorb %al
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_i8_const_shift7:
; X64-AVX2: # %bb.0:
@@ -68,14 +68,14 @@ define i8 @rotl_i8_const_shift7(i8 %x) nounwind {
}
define i64 @rotl_i64_const_shift(i64 %x) nounwind {
-; X32-SSE2-LABEL: rotl_i64_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl %ecx, %eax
-; X32-SSE2-NEXT: shldl $3, %edx, %eax
-; X32-SSE2-NEXT: shldl $3, %ecx, %edx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_i64_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl %ecx, %eax
+; X86-SSE2-NEXT: shldl $3, %edx, %eax
+; X86-SSE2-NEXT: shldl $3, %ecx, %edx
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_i64_const_shift:
; X64-AVX2: # %bb.0:
@@ -87,12 +87,12 @@ define i64 @rotl_i64_const_shift(i64 %x) nounwind {
}
define i16 @rotl_i16(i16 %x, i16 %z) nounwind {
-; X32-SSE2-LABEL: rotl_i16:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: rolw %cl, %ax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_i16:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: rolw %cl, %ax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_i16:
; X64-AVX2: # %bb.0:
@@ -107,12 +107,12 @@ define i16 @rotl_i16(i16 %x, i16 %z) nounwind {
}
define i32 @rotl_i32(i32 %x, i32 %z) nounwind {
-; X32-SSE2-LABEL: rotl_i32:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: roll %cl, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_i32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: roll %cl, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_i32:
; X64-AVX2: # %bb.0:
@@ -128,24 +128,24 @@ define i32 @rotl_i32(i32 %x, i32 %z) nounwind {
; Vector rotate.
define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
-; X32-SSE2-LABEL: rotl_v4i32:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X32-SSE2-NEXT: pslld $23, %xmm1
-; X32-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X32-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE2-NEXT: pmuludq %xmm2, %xmm1
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: por %xmm3, %xmm0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_v4i32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: pslld $23, %xmm1
+; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
+; X86-SSE2-NEXT: cvttps2dq %xmm1, %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE2-NEXT: por %xmm3, %xmm0
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_v4i32:
; X64-AVX2: # %bb.0:
@@ -164,13 +164,13 @@ define <4 x i32> @rotl_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
; Vector rotate by constant splat amount.
define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) nounwind {
-; X32-SSE2-LABEL: rotl_v4i32_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE2-NEXT: psrld $29, %xmm1
-; X32-SSE2-NEXT: pslld $3, %xmm0
-; X32-SSE2-NEXT: por %xmm1, %xmm0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_v4i32_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT: psrld $29, %xmm1
+; X86-SSE2-NEXT: pslld $3, %xmm0
+; X86-SSE2-NEXT: por %xmm1, %xmm0
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_v4i32_const_shift:
; X64-AVX2: # %bb.0:
@@ -185,11 +185,11 @@ define <4 x i32> @rotl_v4i32_const_shift(<4 x i32> %x) nounwind {
; Repeat everything for funnel shift right.
define i8 @rotr_i8_const_shift(i8 %x) nounwind {
-; X32-SSE2-LABEL: rotr_i8_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-SSE2-NEXT: rorb $3, %al
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_i8_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-SSE2-NEXT: rorb $3, %al
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_i8_const_shift:
; X64-AVX2: # %bb.0:
@@ -202,11 +202,11 @@ define i8 @rotr_i8_const_shift(i8 %x) nounwind {
}
define i8 @rotr_i8_const_shift1(i8 %x) nounwind {
-; X32-SSE2-LABEL: rotr_i8_const_shift1:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-SSE2-NEXT: rorb %al
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_i8_const_shift1:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-SSE2-NEXT: rorb %al
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_i8_const_shift1:
; X64-AVX2: # %bb.0:
@@ -219,11 +219,11 @@ define i8 @rotr_i8_const_shift1(i8 %x) nounwind {
}
define i8 @rotr_i8_const_shift7(i8 %x) nounwind {
-; X32-SSE2-LABEL: rotr_i8_const_shift7:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
-; X32-SSE2-NEXT: rolb %al
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_i8_const_shift7:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al
+; X86-SSE2-NEXT: rolb %al
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_i8_const_shift7:
; X64-AVX2: # %bb.0:
@@ -236,11 +236,11 @@ define i8 @rotr_i8_const_shift7(i8 %x) nounwind {
}
define i32 @rotr_i32_const_shift(i32 %x) nounwind {
-; X32-SSE2-LABEL: rotr_i32_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: rorl $3, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_i32_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: rorl $3, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_i32_const_shift:
; X64-AVX2: # %bb.0:
@@ -254,12 +254,12 @@ define i32 @rotr_i32_const_shift(i32 %x) nounwind {
; When first 2 operands match, it's a rotate (by variable amount).
define i16 @rotr_i16(i16 %x, i16 %z) nounwind {
-; X32-SSE2-LABEL: rotr_i16:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: rorw %cl, %ax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_i16:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movzwl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: rorw %cl, %ax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_i16:
; X64-AVX2: # %bb.0:
@@ -274,22 +274,22 @@ define i16 @rotr_i16(i16 %x, i16 %z) nounwind {
}
define i64 @rotr_i64(i64 %x, i64 %z) nounwind {
-; X32-SSE2-LABEL: rotr_i64:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: testb $32, %cl
-; X32-SSE2-NEXT: movl %eax, %edx
-; X32-SSE2-NEXT: cmovel %esi, %edx
-; X32-SSE2-NEXT: cmovel %eax, %esi
-; X32-SSE2-NEXT: movl %esi, %eax
-; X32-SSE2-NEXT: shrdl %cl, %edx, %eax
-; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-SSE2-NEXT: shrdl %cl, %esi, %edx
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_i64:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: testb $32, %cl
+; X86-SSE2-NEXT: movl %eax, %edx
+; X86-SSE2-NEXT: cmovel %esi, %edx
+; X86-SSE2-NEXT: cmovel %eax, %esi
+; X86-SSE2-NEXT: movl %esi, %eax
+; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
+; X86-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SSE2-NEXT: shrdl %cl, %esi, %edx
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_i64:
; X64-AVX2: # %bb.0:
@@ -305,26 +305,26 @@ define i64 @rotr_i64(i64 %x, i64 %z) nounwind {
; Vector rotate.
define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
-; X32-SSE2-LABEL: rotr_v4i32:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pxor %xmm2, %xmm2
-; X32-SSE2-NEXT: psubd %xmm1, %xmm2
-; X32-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
-; X32-SSE2-NEXT: pslld $23, %xmm2
-; X32-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
-; X32-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; X32-SSE2-NEXT: pmuludq %xmm1, %xmm0
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; X32-SSE2-NEXT: pmuludq %xmm2, %xmm1
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; X32-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; X32-SSE2-NEXT: por %xmm3, %xmm0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_v4i32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pxor %xmm2, %xmm2
+; X86-SSE2-NEXT: psubd %xmm1, %xmm2
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-SSE2-NEXT: pslld $23, %xmm2
+; X86-SSE2-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
+; X86-SSE2-NEXT: cvttps2dq %xmm2, %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; X86-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; X86-SSE2-NEXT: pmuludq %xmm2, %xmm1
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,3,2,3]
+; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X86-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; X86-SSE2-NEXT: por %xmm3, %xmm0
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_v4i32:
; X64-AVX2: # %bb.0:
@@ -345,13 +345,13 @@ define <4 x i32> @rotr_v4i32(<4 x i32> %x, <4 x i32> %z) nounwind {
; Vector rotate by constant splat amount.
define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) nounwind {
-; X32-SSE2-LABEL: rotr_v4i32_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movdqa %xmm0, %xmm1
-; X32-SSE2-NEXT: psrld $3, %xmm1
-; X32-SSE2-NEXT: pslld $29, %xmm0
-; X32-SSE2-NEXT: por %xmm1, %xmm0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_v4i32_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
+; X86-SSE2-NEXT: psrld $3, %xmm1
+; X86-SSE2-NEXT: pslld $29, %xmm0
+; X86-SSE2-NEXT: por %xmm1, %xmm0
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_v4i32_const_shift:
; X64-AVX2: # %bb.0:
@@ -364,10 +364,10 @@ define <4 x i32> @rotr_v4i32_const_shift(<4 x i32> %x) nounwind {
}
define i32 @rotl_i32_shift_by_bitwidth(i32 %x) nounwind {
-; X32-SSE2-LABEL: rotl_i32_shift_by_bitwidth:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotl_i32_shift_by_bitwidth:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotl_i32_shift_by_bitwidth:
; X64-AVX2: # %bb.0:
@@ -378,10 +378,10 @@ define i32 @rotl_i32_shift_by_bitwidth(i32 %x) nounwind {
}
define i32 @rotr_i32_shift_by_bitwidth(i32 %x) nounwind {
-; X32-SSE2-LABEL: rotr_i32_shift_by_bitwidth:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: rotr_i32_shift_by_bitwidth:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: rotr_i32_shift_by_bitwidth:
; X64-AVX2: # %bb.0:
@@ -392,17 +392,17 @@ define i32 @rotr_i32_shift_by_bitwidth(i32 %x) nounwind {
}
define <4 x i32> @rotl_v4i32_shift_by_bitwidth(<4 x i32> %x) nounwind {
-; ANY-LABEL: rotl_v4i32_shift_by_bitwidth:
-; ANY: # %bb.0:
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: rotl_v4i32_shift_by_bitwidth:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret{{[l|q]}}
%f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
ret <4 x i32> %f
}
define <4 x i32> @rotr_v4i32_shift_by_bitwidth(<4 x i32> %x) nounwind {
-; ANY-LABEL: rotr_v4i32_shift_by_bitwidth:
-; ANY: # %bb.0:
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: rotr_v4i32_shift_by_bitwidth:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret{{[l|q]}}
%f = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
ret <4 x i32> %f
}
@@ -416,10 +416,10 @@ declare i7 @llvm.fshr.i7(i7, i7, i7)
; Try an oversized shift to test modulo functionality.
define i7 @fshl_i7() {
-; ANY-LABEL: fshl_i7:
-; ANY: # %bb.0:
-; ANY-NEXT: movb $67, %al
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: fshl_i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movb $67, %al
+; CHECK-NEXT: ret{{[l|q]}}
%f = call i7 @llvm.fshl.i7(i7 112, i7 112, i7 9)
ret i7 %f
}
@@ -428,10 +428,10 @@ define i7 @fshl_i7() {
; Try an oversized shift to test modulo functionality.
define i7 @fshr_i7() {
-; ANY-LABEL: fshr_i7:
-; ANY: # %bb.0:
-; ANY-NEXT: movb $60, %al
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: fshr_i7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movb $60, %al
+; CHECK-NEXT: ret{{[l|q]}}
%f = call i7 @llvm.fshr.i7(i7 113, i7 113, i7 16)
ret i7 %f
}
diff --git a/llvm/test/CodeGen/X86/funnel-shift.ll b/llvm/test/CodeGen/X86/funnel-shift.ll
index ef1761d39f9e..2577c333c928 100644
--- a/llvm/test/CodeGen/X86/funnel-shift.ll
+++ b/llvm/test/CodeGen/X86/funnel-shift.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-- -mattr=sse2 | FileCheck %s --check-prefixes=ANY,X32-SSE2
-; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=ANY,X64-AVX2
+; RUN: llc < %s -mtriple=i686-- -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,X64-AVX2
declare i8 @llvm.fshl.i8(i8, i8, i8)
declare i16 @llvm.fshl.i16(i16, i16, i16)
@@ -18,13 +18,13 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
; General case - all operands can be variables
define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) nounwind {
-; X32-SSE2-LABEL: fshl_i32:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shldl %cl, %edx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shldl %cl, %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32:
; X64-AVX2: # %bb.0:
@@ -38,25 +38,25 @@ define i32 @fshl_i32(i32 %x, i32 %y, i32 %z) nounwind {
}
define i64 @fshl_i64(i64 %x, i64 %y, i64 %z) nounwind {
-; X32-SSE2-LABEL: fshl_i64:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %edi
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: testb $32, %cl
-; X32-SSE2-NEXT: movl %edx, %edi
-; X32-SSE2-NEXT: cmovnel %esi, %edi
-; X32-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: cmovnel {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: movl %edi, %eax
-; X32-SSE2-NEXT: shldl %cl, %esi, %eax
-; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-SSE2-NEXT: shldl %cl, %edi, %edx
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %edi
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i64:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: testb $32, %cl
+; X86-SSE2-NEXT: movl %edx, %edi
+; X86-SSE2-NEXT: cmovnel %esi, %edi
+; X86-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: cmovnel {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: movl %edi, %eax
+; X86-SSE2-NEXT: shldl %cl, %esi, %eax
+; X86-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SSE2-NEXT: shldl %cl, %edi, %edx
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i64:
; X64-AVX2: # %bb.0:
@@ -70,51 +70,51 @@ define i64 @fshl_i64(i64 %x, i64 %y, i64 %z) nounwind {
}
define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
-; X32-SSE2-LABEL: fshl_i128:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %ebp
-; X32-SSE2-NEXT: pushl %ebx
-; X32-SSE2-NEXT: pushl %edi
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: testb $64, %cl
-; X32-SSE2-NEXT: movl %esi, %eax
-; X32-SSE2-NEXT: cmovnel %ebx, %eax
-; X32-SSE2-NEXT: movl %edx, %ebp
-; X32-SSE2-NEXT: cmovnel %edi, %ebp
-; X32-SSE2-NEXT: cmovnel {{[0-9]+}}(%esp), %edi
-; X32-SSE2-NEXT: cmovnel {{[0-9]+}}(%esp), %ebx
-; X32-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: testb $32, %cl
-; X32-SSE2-NEXT: cmovnel %esi, %edx
-; X32-SSE2-NEXT: cmovnel %ebp, %esi
-; X32-SSE2-NEXT: cmovnel %eax, %ebp
-; X32-SSE2-NEXT: cmovel %edi, %ebx
-; X32-SSE2-NEXT: cmovel %eax, %edi
-; X32-SSE2-NEXT: movl %edi, %eax
-; X32-SSE2-NEXT: shldl %cl, %ebx, %eax
-; X32-SSE2-NEXT: movl %ebp, %ebx
-; X32-SSE2-NEXT: shldl %cl, %edi, %ebx
-; X32-SSE2-NEXT: movl %esi, %edi
-; X32-SSE2-NEXT: shldl %cl, %ebp, %edi
-; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-SSE2-NEXT: shldl %cl, %esi, %edx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl %edx, 12(%ecx)
-; X32-SSE2-NEXT: movl %edi, 8(%ecx)
-; X32-SSE2-NEXT: movl %ebx, 4(%ecx)
-; X32-SSE2-NEXT: movl %eax, (%ecx)
-; X32-SSE2-NEXT: movl %ecx, %eax
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %edi
-; X32-SSE2-NEXT: popl %ebx
-; X32-SSE2-NEXT: popl %ebp
-; X32-SSE2-NEXT: retl $4
+; X86-SSE2-LABEL: fshl_i128:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: pushl %ebx
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: testb $64, %cl
+; X86-SSE2-NEXT: movl %esi, %eax
+; X86-SSE2-NEXT: cmovnel %ebx, %eax
+; X86-SSE2-NEXT: movl %edx, %ebp
+; X86-SSE2-NEXT: cmovnel %edi, %ebp
+; X86-SSE2-NEXT: cmovnel {{[0-9]+}}(%esp), %edi
+; X86-SSE2-NEXT: cmovnel {{[0-9]+}}(%esp), %ebx
+; X86-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: cmovel {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: testb $32, %cl
+; X86-SSE2-NEXT: cmovnel %esi, %edx
+; X86-SSE2-NEXT: cmovnel %ebp, %esi
+; X86-SSE2-NEXT: cmovnel %eax, %ebp
+; X86-SSE2-NEXT: cmovel %edi, %ebx
+; X86-SSE2-NEXT: cmovel %eax, %edi
+; X86-SSE2-NEXT: movl %edi, %eax
+; X86-SSE2-NEXT: shldl %cl, %ebx, %eax
+; X86-SSE2-NEXT: movl %ebp, %ebx
+; X86-SSE2-NEXT: shldl %cl, %edi, %ebx
+; X86-SSE2-NEXT: movl %esi, %edi
+; X86-SSE2-NEXT: shldl %cl, %ebp, %edi
+; X86-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SSE2-NEXT: shldl %cl, %esi, %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl %edx, 12(%ecx)
+; X86-SSE2-NEXT: movl %edi, 8(%ecx)
+; X86-SSE2-NEXT: movl %ebx, 4(%ecx)
+; X86-SSE2-NEXT: movl %eax, (%ecx)
+; X86-SSE2-NEXT: movl %ecx, %eax
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl $4
;
; X64-AVX2-LABEL: fshl_i128:
; X64-AVX2: # %bb.0:
@@ -135,41 +135,41 @@ define i128 @fshl_i128(i128 %x, i128 %y, i128 %z) nounwind {
; Verify that weird types are minimally supported.
declare i37 @llvm.fshl.i37(i37, i37, i37)
define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) nounwind {
-; X32-SSE2-LABEL: fshl_i37:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %ebx
-; X32-SSE2-NEXT: pushl %edi
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-SSE2-NEXT: shldl $27, %ebx, %edi
-; X32-SSE2-NEXT: pushl $0
-; X32-SSE2-NEXT: pushl $37
-; X32-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-SSE2-NEXT: calll __umoddi3
-; X32-SSE2-NEXT: addl $16, %esp
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: testb $32, %cl
-; X32-SSE2-NEXT: jne .LBB3_1
-; X32-SSE2-NEXT: # %bb.2:
-; X32-SSE2-NEXT: movl %edi, %ebx
-; X32-SSE2-NEXT: movl %esi, %edi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: jmp .LBB3_3
-; X32-SSE2-NEXT: .LBB3_1:
-; X32-SSE2-NEXT: shll $27, %ebx
-; X32-SSE2-NEXT: .LBB3_3:
-; X32-SSE2-NEXT: movl %edi, %eax
-; X32-SSE2-NEXT: shldl %cl, %ebx, %eax
-; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-SSE2-NEXT: shldl %cl, %edi, %esi
-; X32-SSE2-NEXT: movl %esi, %edx
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %edi
-; X32-SSE2-NEXT: popl %ebx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i37:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebx
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-SSE2-NEXT: shldl $27, %ebx, %edi
+; X86-SSE2-NEXT: pushl $0
+; X86-SSE2-NEXT: pushl $37
+; X86-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: calll __umoddi3
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %eax, %ecx
+; X86-SSE2-NEXT: testb $32, %cl
+; X86-SSE2-NEXT: jne .LBB3_1
+; X86-SSE2-NEXT: # %bb.2:
+; X86-SSE2-NEXT: movl %edi, %ebx
+; X86-SSE2-NEXT: movl %esi, %edi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: jmp .LBB3_3
+; X86-SSE2-NEXT: .LBB3_1:
+; X86-SSE2-NEXT: shll $27, %ebx
+; X86-SSE2-NEXT: .LBB3_3:
+; X86-SSE2-NEXT: movl %edi, %eax
+; X86-SSE2-NEXT: shldl %cl, %ebx, %eax
+; X86-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SSE2-NEXT: shldl %cl, %edi, %esi
+; X86-SSE2-NEXT: movl %esi, %edx
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i37:
; X64-AVX2: # %bb.0:
@@ -194,10 +194,10 @@ define i37 @fshl_i37(i37 %x, i37 %y, i37 %z) nounwind {
declare i7 @llvm.fshl.i7(i7, i7, i7)
define i7 @fshl_i7_const_fold() {
-; ANY-LABEL: fshl_i7_const_fold:
-; ANY: # %bb.0:
-; ANY-NEXT: movb $67, %al
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: fshl_i7_const_fold:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movb $67, %al
+; CHECK-NEXT: ret{{[l|q]}}
%f = call i7 @llvm.fshl.i7(i7 112, i7 127, i7 2)
ret i7 %f
}
@@ -205,12 +205,12 @@ define i7 @fshl_i7_const_fold() {
; With constant shift amount, this is 'shld' with constant operand.
define i32 @fshl_i32_const_shift(i32 %x, i32 %y) nounwind {
-; X32-SSE2-LABEL: fshl_i32_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shldl $9, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shldl $9, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_const_shift:
; X64-AVX2: # %bb.0:
@@ -224,12 +224,12 @@ define i32 @fshl_i32_const_shift(i32 %x, i32 %y) nounwind {
; Check modulo math on shift amount.
define i32 @fshl_i32_const_overshift(i32 %x, i32 %y) nounwind {
-; X32-SSE2-LABEL: fshl_i32_const_overshift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shldl $9, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_const_overshift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shldl $9, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_const_overshift:
; X64-AVX2: # %bb.0:
@@ -243,14 +243,14 @@ define i32 @fshl_i32_const_overshift(i32 %x, i32 %y) nounwind {
; 64-bit should also work.
define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) nounwind {
-; X32-SSE2-LABEL: fshl_i64_const_overshift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: shldl $9, %ecx, %edx
-; X32-SSE2-NEXT: shrdl $23, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i64_const_overshift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: shldl $9, %ecx, %edx
+; X86-SSE2-NEXT: shrdl $23, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i64_const_overshift:
; X64-AVX2: # %bb.0:
@@ -264,10 +264,10 @@ define i64 @fshl_i64_const_overshift(i64 %x, i64 %y) nounwind {
; This should work without any node-specific logic.
define i8 @fshl_i8_const_fold() nounwind {
-; ANY-LABEL: fshl_i8_const_fold:
-; ANY: # %bb.0:
-; ANY-NEXT: movb $-128, %al
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: fshl_i8_const_fold:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movb $-128, %al
+; CHECK-NEXT: ret{{[l|q]}}
%f = call i8 @llvm.fshl.i8(i8 255, i8 0, i8 7)
ret i8 %f
}
@@ -277,13 +277,13 @@ define i8 @fshl_i8_const_fold() nounwind {
; General case - all operands can be variables
define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) nounwind {
-; X32-SSE2-LABEL: fshr_i32:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl %cl, %edx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32:
; X64-AVX2: # %bb.0:
@@ -299,42 +299,42 @@ define i32 @fshr_i32(i32 %x, i32 %y, i32 %z) nounwind {
; Verify that weird types are minimally supported.
declare i37 @llvm.fshr.i37(i37, i37, i37)
define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) nounwind {
-; X32-SSE2-LABEL: fshr_i37:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %ebx
-; X32-SSE2-NEXT: pushl %edi
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: shldl $27, %ebx, %esi
-; X32-SSE2-NEXT: pushl $0
-; X32-SSE2-NEXT: pushl $37
-; X32-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
-; X32-SSE2-NEXT: calll __umoddi3
-; X32-SSE2-NEXT: addl $16, %esp
-; X32-SSE2-NEXT: movl %eax, %ecx
-; X32-SSE2-NEXT: addl $27, %ecx
-; X32-SSE2-NEXT: testb $32, %cl
-; X32-SSE2-NEXT: je .LBB10_1
-; X32-SSE2-NEXT: # %bb.2:
-; X32-SSE2-NEXT: movl %edi, %edx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
-; X32-SSE2-NEXT: jmp .LBB10_3
-; X32-SSE2-NEXT: .LBB10_1:
-; X32-SSE2-NEXT: shll $27, %ebx
-; X32-SSE2-NEXT: movl %esi, %edx
-; X32-SSE2-NEXT: movl %ebx, %esi
-; X32-SSE2-NEXT: .LBB10_3:
-; X32-SSE2-NEXT: shrdl %cl, %edx, %esi
-; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-SSE2-NEXT: shrdl %cl, %edi, %edx
-; X32-SSE2-NEXT: movl %esi, %eax
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %edi
-; X32-SSE2-NEXT: popl %ebx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i37:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebx
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ebx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: shldl $27, %ebx, %esi
+; X86-SSE2-NEXT: pushl $0
+; X86-SSE2-NEXT: pushl $37
+; X86-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: calll __umoddi3
+; X86-SSE2-NEXT: addl $16, %esp
+; X86-SSE2-NEXT: movl %eax, %ecx
+; X86-SSE2-NEXT: addl $27, %ecx
+; X86-SSE2-NEXT: testb $32, %cl
+; X86-SSE2-NEXT: je .LBB10_1
+; X86-SSE2-NEXT: # %bb.2:
+; X86-SSE2-NEXT: movl %edi, %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edi
+; X86-SSE2-NEXT: jmp .LBB10_3
+; X86-SSE2-NEXT: .LBB10_1:
+; X86-SSE2-NEXT: shll $27, %ebx
+; X86-SSE2-NEXT: movl %esi, %edx
+; X86-SSE2-NEXT: movl %ebx, %esi
+; X86-SSE2-NEXT: .LBB10_3:
+; X86-SSE2-NEXT: shrdl %cl, %edx, %esi
+; X86-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SSE2-NEXT: shrdl %cl, %edi, %edx
+; X86-SSE2-NEXT: movl %esi, %eax
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i37:
; X64-AVX2: # %bb.0:
@@ -360,10 +360,10 @@ define i37 @fshr_i37(i37 %x, i37 %y, i37 %z) nounwind {
declare i7 @llvm.fshr.i7(i7, i7, i7)
define i7 @fshr_i7_const_fold() nounwind {
-; ANY-LABEL: fshr_i7_const_fold:
-; ANY: # %bb.0:
-; ANY-NEXT: movb $31, %al
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: fshr_i7_const_fold:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movb $31, %al
+; CHECK-NEXT: ret{{[l|q]}}
%f = call i7 @llvm.fshr.i7(i7 112, i7 127, i7 2)
ret i7 %f
}
@@ -371,12 +371,12 @@ define i7 @fshr_i7_const_fold() nounwind {
; demanded bits tests
define i32 @fshl_i32_demandedbits(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_demandedbits:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shldl $9, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_demandedbits:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shldl $9, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_demandedbits:
; X64-AVX2: # %bb.0:
@@ -390,12 +390,12 @@ define i32 @fshl_i32_demandedbits(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_demandedbits(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_demandedbits:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl $9, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_demandedbits:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl $9, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_demandedbits:
; X64-AVX2: # %bb.0:
@@ -411,12 +411,12 @@ define i32 @fshr_i32_demandedbits(i32 %a0, i32 %a1) nounwind {
; undef handling
define i32 @fshl_i32_undef0(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_undef0:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shldl %cl, %eax, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_undef0:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shldl %cl, %eax, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_undef0:
; X64-AVX2: # %bb.0:
@@ -429,14 +429,14 @@ define i32 @fshl_i32_undef0(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshl_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_undef0_msk:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: andl $7, %ecx
-; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-SSE2-NEXT: shldl %cl, %eax, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_undef0_msk:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: andl $7, %ecx
+; X86-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SSE2-NEXT: shldl %cl, %eax, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_undef0_msk:
; X64-AVX2: # %bb.0:
@@ -451,11 +451,11 @@ define i32 @fshl_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshl_i32_undef0_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshl_i32_undef0_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrl $23, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_undef0_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrl $23, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_undef0_cst:
; X64-AVX2: # %bb.0:
@@ -467,12 +467,12 @@ define i32 @fshl_i32_undef0_cst(i32 %a0) nounwind {
}
define i32 @fshl_i32_undef1(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_undef1:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shldl %cl, %eax, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_undef1:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shldl %cl, %eax, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_undef1:
; X64-AVX2: # %bb.0:
@@ -486,13 +486,13 @@ define i32 @fshl_i32_undef1(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshl_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_undef1_msk:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: andb $7, %cl
-; X32-SSE2-NEXT: shll %cl, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_undef1_msk:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: andb $7, %cl
+; X86-SSE2-NEXT: shll %cl, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_undef1_msk:
; X64-AVX2: # %bb.0:
@@ -508,11 +508,11 @@ define i32 @fshl_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshl_i32_undef1_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshl_i32_undef1_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shll $9, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_undef1_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shll $9, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_undef1_cst:
; X64-AVX2: # %bb.0:
@@ -524,12 +524,12 @@ define i32 @fshl_i32_undef1_cst(i32 %a0) nounwind {
}
define i32 @fshl_i32_undef2(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_undef2:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shldl %cl, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_undef2:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shldl %cl, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_undef2:
; X64-AVX2: # %bb.0:
@@ -541,12 +541,12 @@ define i32 @fshl_i32_undef2(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_undef0(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_undef0:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl %cl, %eax, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_undef0:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl %cl, %eax, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_undef0:
; X64-AVX2: # %bb.0:
@@ -560,13 +560,13 @@ define i32 @fshr_i32_undef0(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_undef0_msk:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: andb $7, %cl
-; X32-SSE2-NEXT: shrl %cl, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_undef0_msk:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: andb $7, %cl
+; X86-SSE2-NEXT: shrl %cl, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_undef0_msk:
; X64-AVX2: # %bb.0:
@@ -582,11 +582,11 @@ define i32 @fshr_i32_undef0_msk(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_undef0_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshr_i32_undef0_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrl $9, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_undef0_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrl $9, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_undef0_cst:
; X64-AVX2: # %bb.0:
@@ -598,12 +598,12 @@ define i32 @fshr_i32_undef0_cst(i32 %a0) nounwind {
}
define i32 @fshr_i32_undef1(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_undef1:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl %cl, %eax, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_undef1:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl %cl, %eax, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_undef1:
; X64-AVX2: # %bb.0:
@@ -616,14 +616,14 @@ define i32 @fshr_i32_undef1(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_undef1_msk:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: andl $7, %ecx
-; X32-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
-; X32-SSE2-NEXT: shrdl %cl, %eax, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_undef1_msk:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: andl $7, %ecx
+; X86-SSE2-NEXT: # kill: def $cl killed $cl killed $ecx
+; X86-SSE2-NEXT: shrdl %cl, %eax, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_undef1_msk:
; X64-AVX2: # %bb.0:
@@ -638,11 +638,11 @@ define i32 @fshr_i32_undef1_msk(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_undef1_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshr_i32_undef1_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shll $23, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_undef1_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shll $23, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_undef1_cst:
; X64-AVX2: # %bb.0:
@@ -654,12 +654,12 @@ define i32 @fshr_i32_undef1_cst(i32 %a0) nounwind {
}
define i32 @fshr_i32_undef2(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_undef2:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl %cl, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_undef2:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl %cl, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_undef2:
; X64-AVX2: # %bb.0:
@@ -673,13 +673,13 @@ define i32 @fshr_i32_undef2(i32 %a0, i32 %a1) nounwind {
; shift zero args
define i32 @fshl_i32_zero0(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_zero0:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: xorl %eax, %eax
-; X32-SSE2-NEXT: shldl %cl, %edx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_zero0:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: xorl %eax, %eax
+; X86-SSE2-NEXT: shldl %cl, %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_zero0:
; X64-AVX2: # %bb.0:
@@ -693,11 +693,11 @@ define i32 @fshl_i32_zero0(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshl_i32_zero0_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshl_i32_zero0_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrl $23, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_zero0_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrl $23, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_zero0_cst:
; X64-AVX2: # %bb.0:
@@ -709,13 +709,13 @@ define i32 @fshl_i32_zero0_cst(i32 %a0) nounwind {
}
define i32 @fshl_i32_zero1(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_zero1:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: shldl %cl, %edx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_zero1:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: shldl %cl, %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_zero1:
; X64-AVX2: # %bb.0:
@@ -730,11 +730,11 @@ define i32 @fshl_i32_zero1(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshl_i32_zero1_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshl_i32_zero1_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shll $9, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_zero1_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shll $9, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_zero1_cst:
; X64-AVX2: # %bb.0:
@@ -746,13 +746,13 @@ define i32 @fshl_i32_zero1_cst(i32 %a0) nounwind {
}
define i32 @fshr_i32_zero0(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_zero0:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: xorl %edx, %edx
-; X32-SSE2-NEXT: shrdl %cl, %edx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_zero0:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: xorl %edx, %edx
+; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_zero0:
; X64-AVX2: # %bb.0:
@@ -767,11 +767,11 @@ define i32 @fshr_i32_zero0(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_zero0_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshr_i32_zero0_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrl $9, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_zero0_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrl $9, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_zero0_cst:
; X64-AVX2: # %bb.0:
@@ -783,13 +783,13 @@ define i32 @fshr_i32_zero0_cst(i32 %a0) nounwind {
}
define i32 @fshr_i32_zero1(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_zero1:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: xorl %eax, %eax
-; X32-SSE2-NEXT: shrdl %cl, %edx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_zero1:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movb {{[0-9]+}}(%esp), %cl
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: xorl %eax, %eax
+; X86-SSE2-NEXT: shrdl %cl, %edx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_zero1:
; X64-AVX2: # %bb.0:
@@ -803,11 +803,11 @@ define i32 @fshr_i32_zero1(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_zero1_cst(i32 %a0) nounwind {
-; X32-SSE2-LABEL: fshr_i32_zero1_cst:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shll $23, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_zero1_cst:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shll $23, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_zero1_cst:
; X64-AVX2: # %bb.0:
@@ -821,10 +821,10 @@ define i32 @fshr_i32_zero1_cst(i32 %a0) nounwind {
; shift by zero
define i32 @fshl_i32_zero2(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshl_i32_zero2:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_zero2:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_zero2:
; X64-AVX2: # %bb.0:
@@ -835,10 +835,10 @@ define i32 @fshl_i32_zero2(i32 %a0, i32 %a1) nounwind {
}
define i32 @fshr_i32_zero2(i32 %a0, i32 %a1) nounwind {
-; X32-SSE2-LABEL: fshr_i32_zero2:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_zero2:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_zero2:
; X64-AVX2: # %bb.0:
@@ -851,12 +851,12 @@ define i32 @fshr_i32_zero2(i32 %a0, i32 %a1) nounwind {
; With constant shift amount, this is 'shrd' or 'shld'.
define i32 @fshr_i32_const_shift(i32 %x, i32 %y) nounwind {
-; X32-SSE2-LABEL: fshr_i32_const_shift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl $9, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_const_shift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl $9, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_const_shift:
; X64-AVX2: # %bb.0:
@@ -870,12 +870,12 @@ define i32 @fshr_i32_const_shift(i32 %x, i32 %y) nounwind {
; Check modulo math on shift amount. 41-32=9, but right-shift may became left, so 32-9=23.
define i32 @fshr_i32_const_overshift(i32 %x, i32 %y) nounwind {
-; X32-SSE2-LABEL: fshr_i32_const_overshift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl $9, %ecx, %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_const_overshift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl $9, %ecx, %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_const_overshift:
; X64-AVX2: # %bb.0:
@@ -889,14 +889,14 @@ define i32 @fshr_i32_const_overshift(i32 %x, i32 %y) nounwind {
; 64-bit should also work. 105-64 = 41, but right-shift became left, so 64-41=23.
define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) nounwind {
-; X32-SSE2-LABEL: fshr_i64_const_overshift:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: shrdl $9, %ecx, %eax
-; X32-SSE2-NEXT: shldl $23, %ecx, %edx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i64_const_overshift:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: shrdl $9, %ecx, %eax
+; X86-SSE2-NEXT: shldl $23, %ecx, %edx
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i64_const_overshift:
; X64-AVX2: # %bb.0:
@@ -910,19 +910,19 @@ define i64 @fshr_i64_const_overshift(i64 %x, i64 %y) nounwind {
; This should work without any node-specific logic.
define i8 @fshr_i8_const_fold() nounwind {
-; ANY-LABEL: fshr_i8_const_fold:
-; ANY: # %bb.0:
-; ANY-NEXT: movb $-2, %al
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: fshr_i8_const_fold:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movb $-2, %al
+; CHECK-NEXT: ret{{[l|q]}}
%f = call i8 @llvm.fshr.i8(i8 255, i8 0, i8 7)
ret i8 %f
}
define i32 @fshl_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind {
-; X32-SSE2-LABEL: fshl_i32_shift_by_bitwidth:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshl_i32_shift_by_bitwidth:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshl_i32_shift_by_bitwidth:
; X64-AVX2: # %bb.0:
@@ -933,10 +933,10 @@ define i32 @fshl_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind {
}
define i32 @fshr_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind {
-; X32-SSE2-LABEL: fshr_i32_shift_by_bitwidth:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_i32_shift_by_bitwidth:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_i32_shift_by_bitwidth:
; X64-AVX2: # %bb.0:
@@ -947,18 +947,18 @@ define i32 @fshr_i32_shift_by_bitwidth(i32 %x, i32 %y) nounwind {
}
define <4 x i32> @fshl_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) nounwind {
-; ANY-LABEL: fshl_v4i32_shift_by_bitwidth:
-; ANY: # %bb.0:
-; ANY-NEXT: ret{{[l|q]}}
+; CHECK-LABEL: fshl_v4i32_shift_by_bitwidth:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret{{[l|q]}}
%f = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> <i32 32, i32 32, i32 32, i32 32>)
ret <4 x i32> %f
}
define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) nounwind {
-; X32-SSE2-LABEL: fshr_v4i32_shift_by_bitwidth:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: movaps %xmm1, %xmm0
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: fshr_v4i32_shift_by_bitwidth:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movaps %xmm1, %xmm0
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: fshr_v4i32_shift_by_bitwidth:
; X64-AVX2: # %bb.0:
@@ -970,38 +970,38 @@ define <4 x i32> @fshr_v4i32_shift_by_bitwidth(<4 x i32> %x, <4 x i32> %y) nounw
%struct.S = type { [11 x i8], i8 }
define void @PR45265(i32 %0, %struct.S* nocapture readonly %1) nounwind {
-; X32-SSE2-LABEL: PR45265:
-; X32-SSE2: # %bb.0:
-; X32-SSE2-NEXT: pushl %ebx
-; X32-SSE2-NEXT: pushl %edi
-; X32-SSE2-NEXT: pushl %esi
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
-; X32-SSE2-NEXT: leal (%eax,%eax,2), %edi
-; X32-SSE2-NEXT: movzwl 8(%esi,%edi,4), %ebx
-; X32-SSE2-NEXT: movsbl 10(%esi,%edi,4), %ecx
-; X32-SSE2-NEXT: movl %ecx, %edx
-; X32-SSE2-NEXT: shll $16, %edx
-; X32-SSE2-NEXT: orl %ebx, %edx
-; X32-SSE2-NEXT: movl 4(%esi,%edi,4), %esi
-; X32-SSE2-NEXT: shrdl $8, %edx, %esi
-; X32-SSE2-NEXT: xorl %eax, %esi
-; X32-SSE2-NEXT: sarl $31, %eax
-; X32-SSE2-NEXT: sarl $31, %ecx
-; X32-SSE2-NEXT: shldl $24, %edx, %ecx
-; X32-SSE2-NEXT: xorl %eax, %ecx
-; X32-SSE2-NEXT: orl %ecx, %esi
-; X32-SSE2-NEXT: jne .LBB46_1
-; X32-SSE2-NEXT: # %bb.2:
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %edi
-; X32-SSE2-NEXT: popl %ebx
-; X32-SSE2-NEXT: jmp _Z3foov # TAILCALL
-; X32-SSE2-NEXT: .LBB46_1:
-; X32-SSE2-NEXT: popl %esi
-; X32-SSE2-NEXT: popl %edi
-; X32-SSE2-NEXT: popl %ebx
-; X32-SSE2-NEXT: retl
+; X86-SSE2-LABEL: PR45265:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebx
+; X86-SSE2-NEXT: pushl %edi
+; X86-SSE2-NEXT: pushl %esi
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-SSE2-NEXT: leal (%eax,%eax,2), %edi
+; X86-SSE2-NEXT: movzwl 8(%esi,%edi,4), %ebx
+; X86-SSE2-NEXT: movsbl 10(%esi,%edi,4), %ecx
+; X86-SSE2-NEXT: movl %ecx, %edx
+; X86-SSE2-NEXT: shll $16, %edx
+; X86-SSE2-NEXT: orl %ebx, %edx
+; X86-SSE2-NEXT: movl 4(%esi,%edi,4), %esi
+; X86-SSE2-NEXT: shrdl $8, %edx, %esi
+; X86-SSE2-NEXT: xorl %eax, %esi
+; X86-SSE2-NEXT: sarl $31, %eax
+; X86-SSE2-NEXT: sarl $31, %ecx
+; X86-SSE2-NEXT: shldl $24, %edx, %ecx
+; X86-SSE2-NEXT: xorl %eax, %ecx
+; X86-SSE2-NEXT: orl %ecx, %esi
+; X86-SSE2-NEXT: jne .LBB46_1
+; X86-SSE2-NEXT: # %bb.2:
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: jmp _Z3foov # TAILCALL
+; X86-SSE2-NEXT: .LBB46_1:
+; X86-SSE2-NEXT: popl %esi
+; X86-SSE2-NEXT: popl %edi
+; X86-SSE2-NEXT: popl %ebx
+; X86-SSE2-NEXT: retl
;
; X64-AVX2-LABEL: PR45265:
; X64-AVX2: # %bb.0:
More information about the llvm-commits
mailing list