[llvm] r276566 - [X86][SSE] Regenerate shifts tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sun Jul 24 08:25:37 PDT 2016


Author: rksimon
Date: Sun Jul 24 10:25:36 2016
New Revision: 276566

URL: http://llvm.org/viewvc/llvm-project?rev=276566&view=rev
Log:
[X86][SSE] Regenerate shifts tests


Modified:
    llvm/trunk/test/CodeGen/X86/x86-shifts.ll

Modified: llvm/trunk/test/CodeGen/X86/x86-shifts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/x86-shifts.ll?rev=276566&r1=276565&r2=276566&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/x86-shifts.ll (original)
+++ llvm/trunk/test/CodeGen/X86/x86-shifts.ll Sun Jul 24 10:25:36 2016
@@ -1,14 +1,26 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse2 | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
 
 ; Splat patterns below
 
-
 define <4 x i32> @shl4(<4 x i32> %A) nounwind {
+; X32-LABEL: shl4:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    pslld $2, %xmm1
+; X32-NEXT:    paddd %xmm0, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shl4:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    pslld $2, %xmm1
+; X64-NEXT:    paddd %xmm0, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      shl4
-; CHECK:      pslld
-; CHECK:      padd
-; CHECK:      ret
   %B = shl <4 x i32> %A,  < i32 2, i32 2, i32 2, i32 2>
   %C = shl <4 x i32> %A,  < i32 1, i32 1, i32 1, i32 1>
   %K = xor <4 x i32> %B, %C
@@ -16,11 +28,22 @@ entry:
 }
 
 define <4 x i32> @shr4(<4 x i32> %A) nounwind {
+; X32-LABEL: shr4:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psrld $2, %xmm1
+; X32-NEXT:    psrld $1, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shr4:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrld $2, %xmm1
+; X64-NEXT:    psrld $1, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      shr4
-; CHECK:      psrld
-; CHECK-NEXT: psrld
-; CHECK:      ret
   %B = lshr <4 x i32> %A,  < i32 2, i32 2, i32 2, i32 2>
   %C = lshr <4 x i32> %A,  < i32 1, i32 1, i32 1, i32 1>
   %K = xor <4 x i32> %B, %C
@@ -28,11 +51,22 @@ entry:
 }
 
 define <4 x i32> @sra4(<4 x i32> %A) nounwind {
+; X32-LABEL: sra4:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psrad $2, %xmm1
+; X32-NEXT:    psrad $1, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: sra4:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrad $2, %xmm1
+; X64-NEXT:    psrad $1, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      sra4
-; CHECK:      psrad
-; CHECK-NEXT: psrad
-; CHECK:      ret
   %B = ashr <4 x i32> %A,  < i32 2, i32 2, i32 2, i32 2>
   %C = ashr <4 x i32> %A,  < i32 1, i32 1, i32 1, i32 1>
   %K = xor <4 x i32> %B, %C
@@ -40,11 +74,22 @@ entry:
 }
 
 define <2 x i64> @shl2(<2 x i64> %A) nounwind {
+; X32-LABEL: shl2:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psllq $2, %xmm1
+; X32-NEXT:    psllq $9, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shl2:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psllq $2, %xmm1
+; X64-NEXT:    psllq $9, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      shl2
-; CHECK:      psllq
-; CHECK-NEXT: psllq
-; CHECK:      ret
   %B = shl <2 x i64> %A,  < i64 2, i64 2>
   %C = shl <2 x i64> %A,  < i64 9, i64 9>
   %K = xor <2 x i64> %B, %C
@@ -52,11 +97,22 @@ entry:
 }
 
 define <2 x i64> @shr2(<2 x i64> %A) nounwind {
+; X32-LABEL: shr2:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psrlq $8, %xmm1
+; X32-NEXT:    psrlq $1, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shr2:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrlq $8, %xmm1
+; X64-NEXT:    psrlq $1, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      shr2
-; CHECK:      psrlq
-; CHECK-NEXT: psrlq
-; CHECK:      ret
   %B = lshr <2 x i64> %A,  < i64 8, i64 8>
   %C = lshr <2 x i64> %A,  < i64 1, i64 1>
   %K = xor <2 x i64> %B, %C
@@ -65,11 +121,22 @@ entry:
 
 
 define <8 x i16> @shl8(<8 x i16> %A) nounwind {
+; X32-LABEL: shl8:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psllw $2, %xmm1
+; X32-NEXT:    paddw %xmm0, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shl8:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psllw $2, %xmm1
+; X64-NEXT:    paddw %xmm0, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      shl8
-; CHECK:      psllw
-; CHECK:      padd
-; CHECK:      ret
   %B = shl <8 x i16> %A,  < i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   %C = shl <8 x i16> %A,  < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %K = xor <8 x i16> %B, %C
@@ -77,11 +144,22 @@ entry:
 }
 
 define <8 x i16> @shr8(<8 x i16> %A) nounwind {
+; X32-LABEL: shr8:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psrlw $2, %xmm1
+; X32-NEXT:    psrlw $1, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shr8:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrlw $2, %xmm1
+; X64-NEXT:    psrlw $1, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      shr8
-; CHECK:      psrlw
-; CHECK-NEXT: psrlw
-; CHECK:      ret
   %B = lshr <8 x i16> %A,  < i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   %C = lshr <8 x i16> %A,  < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %K = xor <8 x i16> %B, %C
@@ -89,11 +167,22 @@ entry:
 }
 
 define <8 x i16> @sra8(<8 x i16> %A) nounwind {
+; X32-LABEL: sra8:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psraw $2, %xmm1
+; X32-NEXT:    psraw $1, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: sra8:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psraw $2, %xmm1
+; X64-NEXT:    psraw $1, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK:      sra8
-; CHECK:      psraw
-; CHECK-NEXT: psraw
-; CHECK:      ret
   %B = ashr <8 x i16> %A,  < i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
   %C = ashr <8 x i16> %A,  < i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
   %K = xor <8 x i16> %B, %C
@@ -104,11 +193,22 @@ entry:
 
 
 define <8 x i16> @sll8_nosplat(<8 x i16> %A) nounwind {
+; X32-LABEL: sll8_nosplat:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4]
+; X32-NEXT:    pmullw %xmm0, %xmm1
+; X32-NEXT:    pmullw {{\.LCPI.*}}, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: sll8_nosplat:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa {{.*#+}} xmm1 = [2,4,8,64,4,4,4,4]
+; X64-NEXT:    pmullw %xmm0, %xmm1
+; X64-NEXT:    pmullw {{.*}}(%rip), %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK: sll8_nosplat
-; CHECK-NOT: psll
-; CHECK-NOT: psll
-; CHECK:      ret
   %B = shl <8 x i16> %A,  < i16 1, i16 2, i16 3, i16 6, i16 2, i16 2, i16 2, i16 2>
   %C = shl <8 x i16> %A,  < i16 9, i16 7, i16 5, i16 1, i16 4, i16 1, i16 1, i16 1>
   %K = xor <8 x i16> %B, %C
@@ -117,17 +217,30 @@ entry:
 
 
 define <2 x i64> @shr2_nosplat(<2 x i64> %A) nounwind {
+; X32-LABEL: shr2_nosplat:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm2
+; X32-NEXT:    psrlq $8, %xmm2
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psrlq $1, %xmm1
+; X32-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X32-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; X32-NEXT:    xorpd %xmm0, %xmm1
+; X32-NEXT:    movapd %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shr2_nosplat:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrlq $1, %xmm1
+; X64-NEXT:    movdqa %xmm0, %xmm2
+; X64-NEXT:    psrlq $8, %xmm2
+; X64-NEXT:    movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; X64-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; X64-NEXT:    xorpd %xmm0, %xmm1
+; X64-NEXT:    movapd %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK-LABEL: shr2_nosplat
-; CHECK:       movdqa %xmm0, %xmm1
-; CHECK-NEXT:  psrlq  $1, %xmm1
-; CHECK-NEXT:  movdqa %xmm0, %xmm2
-; CHECK-NEXT:  psrlq  $8, %xmm2
-; CHECK-NEXT:  movsd  {{.*#+}} xmm0 = xmm1[0],xmm0[1]
-; CHECK-NEXT:  movsd  {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; CHECK-NEXT:  xorpd  %xmm0, %xmm1
-; CHECK-NEXT:  movapd %xmm1, %xmm0
-; CHECK-NEXT:  ret
   %B = lshr <2 x i64> %A,  < i64 8, i64 1>
   %C = lshr <2 x i64> %A,  < i64 1, i64 0>
   %K = xor <2 x i64> %B, %C
@@ -138,10 +251,22 @@ entry:
 ; Other shifts
 
 define <2 x i32> @shl2_other(<2 x i32> %A) nounwind {
+; X32-LABEL: shl2_other:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psllq $2, %xmm1
+; X32-NEXT:    psllq $9, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shl2_other:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psllq $2, %xmm1
+; X64-NEXT:    psllq $9, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK: shl2_other
-; CHECK: psllq
-; CHECK: ret
   %B = shl <2 x i32> %A,  < i32 2, i32 2>
   %C = shl <2 x i32> %A,  < i32 9, i32 9>
   %K = xor <2 x i32> %B, %C
@@ -149,10 +274,24 @@ entry:
 }
 
 define <2 x i32> @shr2_other(<2 x i32> %A) nounwind {
+; X32-LABEL: shr2_other:
+; X32:       # BB#0: # %entry
+; X32-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-NEXT:    movdqa %xmm0, %xmm1
+; X32-NEXT:    psrlq $8, %xmm1
+; X32-NEXT:    psrlq $1, %xmm0
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shr2_other:
+; X64:       # BB#0: # %entry
+; X64-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-NEXT:    movdqa %xmm0, %xmm1
+; X64-NEXT:    psrlq $8, %xmm1
+; X64-NEXT:    psrlq $1, %xmm0
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    retq
 entry:
-; CHECK: shr2_other
-; CHECK: psrlq
-; CHECK: ret
   %B = lshr <2 x i32> %A,  < i32 8, i32 8>
   %C = lshr <2 x i32> %A,  < i32 1, i32 1>
   %K = xor <2 x i32> %B, %C
@@ -160,39 +299,73 @@ entry:
 }
 
 define <16 x i8> @shl9(<16 x i8> %A) nounwind {
+; X32-LABEL: shl9:
+; X32:       # BB#0:
+; X32-NEXT:    psllw $3, %xmm0
+; X32-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shl9:
+; X64:       # BB#0:
+; X64-NEXT:    psllw $3, %xmm0
+; X64-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-NEXT:    retq
   %B = shl <16 x i8> %A, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <16 x i8> %B
-; CHECK-LABEL: shl9:
-; CHECK: psllw $3
-; CHECK: pand
-; CHECK: ret
 }
 
 define <16 x i8> @shr9(<16 x i8> %A) nounwind {
+; X32-LABEL: shr9:
+; X32:       # BB#0:
+; X32-NEXT:    psrlw $3, %xmm0
+; X32-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: shr9:
+; X64:       # BB#0:
+; X64-NEXT:    psrlw $3, %xmm0
+; X64-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-NEXT:    retq
   %B = lshr <16 x i8> %A, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <16 x i8> %B
-; CHECK-LABEL: shr9:
-; CHECK: psrlw $3
-; CHECK: pand
-; CHECK: ret
 }
 
 define <16 x i8> @sra_v16i8_7(<16 x i8> %A) nounwind {
+; X32-LABEL: sra_v16i8_7:
+; X32:       # BB#0:
+; X32-NEXT:    pxor %xmm1, %xmm1
+; X32-NEXT:    pcmpgtb %xmm0, %xmm1
+; X32-NEXT:    movdqa %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: sra_v16i8_7:
+; X64:       # BB#0:
+; X64-NEXT:    pxor %xmm1, %xmm1
+; X64-NEXT:    pcmpgtb %xmm0, %xmm1
+; X64-NEXT:    movdqa %xmm1, %xmm0
+; X64-NEXT:    retq
   %B = ashr <16 x i8> %A, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
   ret <16 x i8> %B
-; CHECK-LABEL: sra_v16i8_7:
-; CHECK: pxor
-; CHECK: pcmpgtb
-; CHECK: ret
 }
 
 define <16 x i8> @sra_v16i8(<16 x i8> %A) nounwind {
+; X32-LABEL: sra_v16i8:
+; X32:       # BB#0:
+; X32-NEXT:    psrlw $3, %xmm0
+; X32-NEXT:    pand {{\.LCPI.*}}, %xmm0
+; X32-NEXT:    movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; X32-NEXT:    pxor %xmm1, %xmm0
+; X32-NEXT:    psubb %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: sra_v16i8:
+; X64:       # BB#0:
+; X64-NEXT:    psrlw $3, %xmm0
+; X64-NEXT:    pand {{.*}}(%rip), %xmm0
+; X64-NEXT:    movdqa {{.*#+}} xmm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; X64-NEXT:    pxor %xmm1, %xmm0
+; X64-NEXT:    psubb %xmm1, %xmm0
+; X64-NEXT:    retq
   %B = ashr <16 x i8> %A, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
   ret <16 x i8> %B
-; CHECK-LABEL: sra_v16i8:
-; CHECK: psrlw $3
-; CHECK: pand
-; CHECK: pxor
-; CHECK: psubb
-; CHECK: ret
 }




More information about the llvm-commits mailing list