[llvm] r279811 - [X86][SSE4A] The EXTRQ/INSERTQ bit extraction/insertion ops should be in the integer domain

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 26 02:55:42 PDT 2016


Author: rksimon
Date: Fri Aug 26 04:55:41 2016
New Revision: 279811

URL: http://llvm.org/viewvc/llvm-project?rev=279811&view=rev
Log:
[X86][SSE4A] The EXTRQ/INSERTQ bit extraction/insertion ops should be in the integer domain

Modified:
    llvm/trunk/lib/Target/X86/X86InstrSSE.td
    llvm/trunk/test/CodeGen/X86/sse4a.ll
    llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=279811&r1=279810&r2=279811&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Fri Aug 26 04:55:41 2016
@@ -7788,6 +7788,7 @@ defm : pclmul_alias<"lqlq", 0x00>;
 
 let Predicates = [HasSSE4A] in {
 
+let ExeDomain = SSEPackedInt in {
 let Constraints = "$src = $dst" in {
 def EXTRQI : Ii8<0x78, MRMXr, (outs VR128:$dst),
                  (ins VR128:$src, u8imm:$len, u8imm:$idx),
@@ -7811,6 +7812,7 @@ def INSERTQ  : I<0x79, MRMSrcReg, (outs
                  [(set VR128:$dst, (int_x86_sse4a_insertq VR128:$src,
                                     VR128:$mask))]>, XD;
 }
+} // ExeDomain = SSEPackedInt
 
 // Non-temporal (unaligned) scalar stores.
 let AddedComplexity = 400 in { // Prefer non-temporal versions

Modified: llvm/trunk/test/CodeGen/X86/sse4a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse4a.ll?rev=279811&r1=279810&r2=279811&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse4a.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse4a.ll Fri Aug 26 04:55:41 2016
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X32 --check-prefix=X32-SSE
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X32 --check-prefix=X32-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a,+avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX
 
 define <2 x i64> @test_extrqi(<2 x i64> %x) nounwind uwtable ssp {
 ; X32-LABEL: test_extrqi:
@@ -18,6 +18,37 @@ define <2 x i64> @test_extrqi(<2 x i64>
   ret <2 x i64> %1
 }
 
+define <2 x i64> @test_extrqi_domain(<2 x i64> *%p) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_extrqi_domain:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT:    movdqa (%eax), %xmm0
+; X32-SSE-NEXT:    extrq $2, $3, %xmm0
+; X32-SSE-NEXT:    retl
+;
+; X32-AVX-LABEL: test_extrqi_domain:
+; X32-AVX:       # BB#0:
+; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT:    vmovdqa (%eax), %xmm0
+; X32-AVX-NEXT:    extrq $2, $3, %xmm0
+; X32-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_extrqi_domain:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm0
+; X64-SSE-NEXT:    extrq $2, $3, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_extrqi_domain:
+; X64-AVX:       # BB#0:
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm0
+; X64-AVX-NEXT:    extrq $2, $3, %xmm0
+; X64-AVX-NEXT:    retq
+  %1 = load <2 x i64>, <2 x i64> *%p
+  %2 = tail call <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64> %1, i8 3, i8 2)
+  ret <2 x i64> %2
+}
+
 declare <2 x i64> @llvm.x86.sse4a.extrqi(<2 x i64>, i8, i8) nounwind
 
 define <2 x i64> @test_extrq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
@@ -35,6 +66,42 @@ define <2 x i64> @test_extrq(<2 x i64> %
   ret <2 x i64> %2
 }
 
+define <2 x i64> @test_extrq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_extrq_domain:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT:    movdqa (%eax), %xmm1
+; X32-SSE-NEXT:    extrq %xmm0, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+;
+; X32-AVX-LABEL: test_extrq_domain:
+; X32-AVX:       # BB#0:
+; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT:    vmovdqa (%eax), %xmm1
+; X32-AVX-NEXT:    extrq %xmm0, %xmm1
+; X32-AVX-NEXT:    vmovdqa %xmm1, %xmm0
+; X32-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_extrq_domain:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
+; X64-SSE-NEXT:    extrq %xmm0, %xmm1
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_extrq_domain:
+; X64-AVX:       # BB#0:
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX-NEXT:    extrq %xmm0, %xmm1
+; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0
+; X64-AVX-NEXT:    retq
+  %1 = load <2 x i64>, <2 x i64> *%p
+  %2 = bitcast <2 x i64> %y to <16 x i8>
+  %3 = tail call <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64> %1, <16 x i8> %2) nounwind
+  ret <2 x i64> %3
+}
+
 declare <2 x i64> @llvm.x86.sse4a.extrq(<2 x i64>, <16 x i8>) nounwind
 
 define <2 x i64> @test_insertqi(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
@@ -51,6 +118,41 @@ define <2 x i64> @test_insertqi(<2 x i64
   ret <2 x i64> %1
 }
 
+define <2 x i64> @test_insertqi_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_insertqi_domain:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT:    movdqa (%eax), %xmm1
+; X32-SSE-NEXT:    insertq $6, $5, %xmm0, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+;
+; X32-AVX-LABEL: test_insertqi_domain:
+; X32-AVX:       # BB#0:
+; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT:    vmovdqa (%eax), %xmm1
+; X32-AVX-NEXT:    insertq $6, $5, %xmm0, %xmm1
+; X32-AVX-NEXT:    vmovdqa %xmm1, %xmm0
+; X32-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_insertqi_domain:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
+; X64-SSE-NEXT:    insertq $6, $5, %xmm0, %xmm1
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_insertqi_domain:
+; X64-AVX:       # BB#0:
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX-NEXT:    insertq $6, $5, %xmm0, %xmm1
+; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0
+; X64-AVX-NEXT:    retq
+  %1 = load <2 x i64>, <2 x i64> *%p
+  %2 = tail call <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64> %1, <2 x i64> %y, i8 5, i8 6)
+  ret <2 x i64> %2
+}
+
 declare <2 x i64> @llvm.x86.sse4a.insertqi(<2 x i64>, <2 x i64>, i8, i8) nounwind
 
 define <2 x i64> @test_insertq(<2 x i64> %x, <2 x i64> %y) nounwind uwtable ssp {
@@ -67,4 +169,39 @@ define <2 x i64> @test_insertq(<2 x i64>
   ret <2 x i64> %1
 }
 
+define <2 x i64> @test_insertq_domain(<2 x i64> *%p, <2 x i64> %y) nounwind uwtable ssp {
+; X32-SSE-LABEL: test_insertq_domain:
+; X32-SSE:       # BB#0:
+; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-SSE-NEXT:    movdqa (%eax), %xmm1
+; X32-SSE-NEXT:    insertq %xmm0, %xmm1
+; X32-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X32-SSE-NEXT:    retl
+;
+; X32-AVX-LABEL: test_insertq_domain:
+; X32-AVX:       # BB#0:
+; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X32-AVX-NEXT:    vmovdqa (%eax), %xmm1
+; X32-AVX-NEXT:    insertq %xmm0, %xmm1
+; X32-AVX-NEXT:    vmovdqa %xmm1, %xmm0
+; X32-AVX-NEXT:    retl
+;
+; X64-SSE-LABEL: test_insertq_domain:
+; X64-SSE:       # BB#0:
+; X64-SSE-NEXT:    movdqa (%rdi), %xmm1
+; X64-SSE-NEXT:    insertq %xmm0, %xmm1
+; X64-SSE-NEXT:    movdqa %xmm1, %xmm0
+; X64-SSE-NEXT:    retq
+;
+; X64-AVX-LABEL: test_insertq_domain:
+; X64-AVX:       # BB#0:
+; X64-AVX-NEXT:    vmovdqa (%rdi), %xmm1
+; X64-AVX-NEXT:    insertq %xmm0, %xmm1
+; X64-AVX-NEXT:    vmovdqa %xmm1, %xmm0
+; X64-AVX-NEXT:    retq
+  %1 = load <2 x i64>, <2 x i64> *%p
+  %2 = tail call <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64> %1, <2 x i64> %y) nounwind
+  ret <2 x i64> %2
+}
+
 declare <2 x i64> @llvm.x86.sse4a.insertq(<2 x i64>, <2 x i64>) nounwind

Modified: llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll?rev=279811&r1=279810&r2=279811&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-shuffle-sse4a.ll Fri Aug 26 04:55:41 2016
@@ -52,7 +52,7 @@ define <16 x i8> @shuf_0zzzuuuuuuuuuuuu(
 define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(<16 x i8> %a0) {
 ; BTVER1-LABEL: shuf_0zzzzzzz1zzzzzzz:
 ; BTVER1:       # BB#0:
-; BTVER1-NEXT:    movaps %xmm0, %xmm1
+; BTVER1-NEXT:    movdqa %xmm0, %xmm1
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm1 = xmm1[1],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -69,7 +69,7 @@ define <16 x i8> @shuf_0zzzzzzz1zzzzzzz(
 define <16 x i8> @shuf_2zzzzzzz3zzzzzzz(<16 x i8> %a0) {
 ; BTVER1-LABEL: shuf_2zzzzzzz3zzzzzzz:
 ; BTVER1:       # BB#0:
-; BTVER1-NEXT:    movaps %xmm0, %xmm1
+; BTVER1-NEXT:    movdqa %xmm0, %xmm1
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm1 = xmm1[3],zero,zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm0 = xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -101,7 +101,7 @@ define <16 x i8> @shuf_01zzuuuuuuuuuuuu(
 define <16 x i8> @shuf_01zzzzzz23zzzzzz(<16 x i8> %a0) {
 ; BTVER1-LABEL: shuf_01zzzzzz23zzzzzz:
 ; BTVER1:       # BB#0:
-; BTVER1-NEXT:    movaps %xmm0, %xmm1
+; BTVER1-NEXT:    movdqa %xmm0, %xmm1
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
@@ -154,7 +154,7 @@ define <8 x i16> @shuf_012zuuuu(<8 x i16
 define <8 x i16> @shuf_0zzz1zzz(<8 x i16> %a0) {
 ; BTVER1-LABEL: shuf_0zzz1zzz:
 ; BTVER1:       # BB#0:
-; BTVER1-NEXT:    movaps %xmm0, %xmm1
+; BTVER1-NEXT:    movdqa %xmm0, %xmm1
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm1 = xmm1[2,3],zero,zero,zero,zero,zero,zero,xmm1[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    extrq {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[u,u,u,u,u,u,u,u]
 ; BTVER1-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]




More information about the llvm-commits mailing list