[llvm] r278232 - [X86][SSE] Regenerate vector shift lowering tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Wed Aug 10 08:13:50 PDT 2016
Author: rksimon
Date: Wed Aug 10 10:13:49 2016
New Revision: 278232
URL: http://llvm.org/viewvc/llvm-project?rev=278232&view=rev
Log:
[X86][SSE] Regenerate vector shift lowering tests
Modified:
llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll
Modified: llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll?rev=278232&r1=278231&r2=278232&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/lower-vec-shift.ll Wed Aug 10 10:13:49 2016
@@ -1,6 +1,7 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=core-avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
; Verify that the following shifts are lowered into a sequence of two shifts plus
@@ -9,117 +10,181 @@
; emit a simpler sequence of two shifts + blend when possible.
define <8 x i16> @test1(<8 x i16> %a) {
+; SSE-LABEL: test1:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlw $2, %xmm1
+; SSE-NEXT: psrlw $3, %xmm0
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test1:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $2, %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX-NEXT: retq
%lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
ret <8 x i16> %lshr
}
-; CHECK-LABEL: test1
-; SSE: psrlw
-; SSE-NEXT: psrlw
-; SSE-NEXT: movss
-; AVX: vpsrlw
-; AVX-NEXT: vpsrlw
-; AVX-NEXT: vmovss
-; AVX2: vpsrlw
-; AVX2-NEXT: vpsrlw
-; AVX2-NEXT: vmovss
-; CHECK: ret
-
define <8 x i16> @test2(<8 x i16> %a) {
+; SSE-LABEL: test2:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrlw $2, %xmm1
+; SSE-NEXT: psrlw $3, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test2:
+; AVX: # BB#0:
+; AVX-NEXT: vpsrlw $2, %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX-NEXT: retq
%lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2>
ret <8 x i16> %lshr
}
-; CHECK-LABEL: test2
-; SSE: psrlw
-; SSE-NEXT: psrlw
-; SSE-NEXT: movsd
-; AVX: vpsrlw
-; AVX-NEXT: vpsrlw
-; AVX-NEXT: vmovsd
-; AVX2: vpsrlw
-; AVX2-NEXT: vpsrlw
-; AVX2-NEXT: vmovsd
-; CHECK: ret
-
define <4 x i32> @test3(<4 x i32> %a) {
+; SSE-LABEL: test3:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrld $2, %xmm1
+; SSE-NEXT: psrld $3, %xmm0
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test3:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $2, %xmm0, %xmm1
+; AVX1-NEXT: vpsrld $3, %xmm0, %xmm0
+; AVX1-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test3:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
%lshr = lshr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
ret <4 x i32> %lshr
}
-; CHECK-LABEL: test3
-; SSE: psrld
-; SSE-NEXT: psrld
-; SSE-NEXT: movss
-; AVX: vpsrld
-; AVX-NEXT: vpsrld
-; AVX-NEXT: vmovss
-; AVX2: vpsrlvd
-; CHECK: ret
-
define <4 x i32> @test4(<4 x i32> %a) {
+; SSE-LABEL: test4:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrld $2, %xmm1
+; SSE-NEXT: psrld $3, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test4:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrld $2, %xmm0, %xmm1
+; AVX1-NEXT: vpsrld $3, %xmm0, %xmm0
+; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test4:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
%lshr = lshr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>
ret <4 x i32> %lshr
}
-; CHECK-LABEL: test4
-; SSE: psrld
-; SSE-NEXT: psrld
-; SSE-NEXT: movsd
-; AVX: vpsrld
-; AVX-NEXT: vpsrld
-; AVX-NEXT: vmovsd
-; AVX2: vpsrlvd
-; CHECK: ret
-
define <8 x i16> @test5(<8 x i16> %a) {
+; SSE-LABEL: test5:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psraw $2, %xmm1
+; SSE-NEXT: psraw $3, %xmm0
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test5:
+; AVX: # BB#0:
+; AVX-NEXT: vpsraw $2, %xmm0, %xmm1
+; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX-NEXT: retq
%lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
ret <8 x i16> %lshr
}
define <8 x i16> @test6(<8 x i16> %a) {
+; SSE-LABEL: test6:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psraw $2, %xmm1
+; SSE-NEXT: psraw $3, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test6:
+; AVX: # BB#0:
+; AVX-NEXT: vpsraw $2, %xmm0, %xmm1
+; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX-NEXT: retq
%lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2>
ret <8 x i16> %lshr
}
-; CHECK-LABEL: test6
-; SSE: psraw
-; SSE-NEXT: psraw
-; SSE-NEXT: movsd
-; AVX: vpsraw
-; AVX-NEXT: vpsraw
-; AVX-NEXT: vmovsd
-; AVX2: vpsraw
-; AVX2-NEXT: vpsraw
-; AVX2-NEXT: vmovsd
-; CHECK: ret
-
define <4 x i32> @test7(<4 x i32> %a) {
+; SSE-LABEL: test7:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $2, %xmm1
+; SSE-NEXT: psrad $3, %xmm0
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test7:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrad $2, %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $3, %xmm0, %xmm0
+; AVX1-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test7:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
%lshr = ashr <4 x i32> %a, <i32 3, i32 2, i32 2, i32 2>
ret <4 x i32> %lshr
}
-; CHECK-LABEL: test7
-; SSE: psrad
-; SSE-NEXT: psrad
-; SSE-NEXT: movss
-; AVX: vpsrad
-; AVX-NEXT: vpsrad
-; AVX-NEXT: vmovss
-; AVX2: vpsravd
-; CHECK: ret
-
define <4 x i32> @test8(<4 x i32> %a) {
+; SSE-LABEL: test8:
+; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $2, %xmm1
+; SSE-NEXT: psrad $3, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
+; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vpsrad $2, %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $3, %xmm0, %xmm0
+; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT: retq
%lshr = ashr <4 x i32> %a, <i32 3, i32 3, i32 2, i32 2>
ret <4 x i32> %lshr
}
-; CHECK-LABEL: test8
-; SSE: psrad
-; SSE-NEXT: psrad
-; SSE-NEXT: movsd
-; AVX: vpsrad
-; AVX-NEXT: vpsrad
-; AVX-NEXT: vmovsd
-; AVX2: vpsravd
-; CHECK: ret
-
More information about the llvm-commits
mailing list