[llvm] 70ab0a9 - [X86] Add vector shift by scalar test with bitcasted scalar shift amount

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 1 07:44:15 PST 2022


Author: Simon Pilgrim
Date: 2022-03-01T15:40:40Z
New Revision: 70ab0a9b62206add0738833cb40ea47fe5f52700

URL: https://github.com/llvm/llvm-project/commit/70ab0a9b62206add0738833cb40ea47fe5f52700
DIFF: https://github.com/llvm/llvm-project/commit/70ab0a9b62206add0738833cb40ea47fe5f52700.diff

LOG: [X86] Add vector shift by scalar test with bitcasted scalar shift amount

As noted on D120553, we didn't have any tests that explicitly showed the bitcast - we were relying on i64 -> i32 legalization on 32-bit targets

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/pr15296.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/pr15296.ll b/llvm/test/CodeGen/X86/pr15296.ll
index 8476b765dbe26..79a2072178972 100644
--- a/llvm/test/CodeGen/X86/pr15296.ll
+++ b/llvm/test/CodeGen/X86/pr15296.ll
@@ -1,15 +1,25 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-pc-linux -mcpu=corei7-avx | FileCheck %s
+; RUN: llc < %s -mtriple=i686-pc-linux -mcpu=corei7-avx | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7-avx | FileCheck %s --check-prefixes=X64
 
 define <8 x i32> @shiftInput___vyuunu(<8 x i32> %input, i32 %shiftval, <8 x i32> %__mask) nounwind {
-; CHECK-LABEL: shiftInput___vyuunu:
-; CHECK:       # %bb.0: # %allocas
-; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; CHECK-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vpsrld %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpsrld %xmm2, %xmm0, %xmm0
-; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; CHECK-NEXT:    retl
+; X86-LABEL: shiftInput___vyuunu:
+; X86:       # %bb.0: # %allocas
+; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT:    vpsrld %xmm2, %xmm1, %xmm1
+; X86-NEXT:    vpsrld %xmm2, %xmm0, %xmm0
+; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: shiftInput___vyuunu:
+; X64:       # %bb.0: # %allocas
+; X64-NEXT:    vmovd %edi, %xmm1
+; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X64-NEXT:    vpsrld %xmm1, %xmm2, %xmm2
+; X64-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-NEXT:    retq
 allocas:
   %smear.0 = insertelement <8 x i32> undef, i32 %shiftval, i32 0
   %smear.1 = insertelement <8 x i32> %smear.0, i32 %shiftval, i32 1
@@ -24,15 +34,24 @@ allocas:
 }
 
 define <8 x i32> @shiftInput___canonical(<8 x i32> %input, i32 %shiftval, <8 x i32> %__mask) nounwind {
-; CHECK-LABEL: shiftInput___canonical:
-; CHECK:       # %bb.0: # %allocas
-; CHECK-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %xmm1
-; CHECK-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
-; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm2
-; CHECK-NEXT:    vpsrld %xmm1, %xmm2, %xmm2
-; CHECK-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
-; CHECK-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; CHECK-NEXT:    retl
+; X86-LABEL: shiftInput___canonical:
+; X86:       # %bb.0: # %allocas
+; X86-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %xmm1
+; X86-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
+; X86-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X86-NEXT:    vpsrld %xmm1, %xmm2, %xmm2
+; X86-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; X86-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: shiftInput___canonical:
+; X64:       # %bb.0: # %allocas
+; X64-NEXT:    vmovd %edi, %xmm1
+; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X64-NEXT:    vpsrld %xmm1, %xmm2, %xmm2
+; X64-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-NEXT:    retq
 allocas:
   %smear.0 = insertelement <8 x i32> undef, i32 %shiftval, i32 0
   %smear.7 = shufflevector <8 x i32> %smear.0, <8 x i32> undef, <8 x i32> zeroinitializer
@@ -40,18 +59,61 @@ allocas:
   ret <8 x i32> %bitop
 }
 
-define <4 x i64> @shiftInput___64in32bitmode(<4 x i64> %input, i64 %shiftval, <4 x i64> %__mask) nounwind {
-; CHECK-LABEL: shiftInput___64in32bitmode:
-; CHECK:       # %bb.0: # %allocas
-; CHECK-NEXT:    vextractf128 $1, %ymm0, %xmm1
-; CHECK-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
-; CHECK-NEXT:    vpsrlq %xmm2, %xmm1, %xmm1
-; CHECK-NEXT:    vpsrlq %xmm2, %xmm0, %xmm0
-; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; CHECK-NEXT:    retl
+define <4 x i64> @shiftInput___64in32bitmode(<4 x i64> %input, i64 %shiftval) nounwind {
+; X86-LABEL: shiftInput___64in32bitmode:
+; X86:       # %bb.0: # %allocas
+; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT:    vmovddup {{.*#+}} xmm2 = mem[0,0]
+; X86-NEXT:    vpsrlq %xmm2, %xmm1, %xmm1
+; X86-NEXT:    vpsrlq %xmm2, %xmm0, %xmm0
+; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: shiftInput___64in32bitmode:
+; X64:       # %bb.0: # %allocas
+; X64-NEXT:    vmovq %rdi, %xmm1
+; X64-NEXT:    vextractf128 $1, %ymm0, %xmm2
+; X64-NEXT:    vpsrlq %xmm1, %xmm2, %xmm2
+; X64-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
+; X64-NEXT:    vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; X64-NEXT:    retq
 allocas:
   %smear.0 = insertelement <4 x i64> undef, i64 %shiftval, i32 0
   %smear.7 = shufflevector <4 x i64> %smear.0, <4 x i64> undef, <4 x i32> zeroinitializer
   %bitop = lshr <4 x i64> %input, %smear.7
   ret <4 x i64> %bitop
 }
+
+define <4 x i64> @shiftInput___2x32bitcast(<4 x i64> %input, i32 %shiftval) nounwind {
+; X86-LABEL: shiftInput___2x32bitcast:
+; X86:       # %bb.0: # %allocas
+; X86-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X86-NEXT:    vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; X86-NEXT:    vpsrlq %xmm2, %xmm1, %xmm1
+; X86-NEXT:    vpsrlq %xmm2, %xmm0, %xmm0
+; X86-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X86-NEXT:    retl
+;
+; X64-LABEL: shiftInput___2x32bitcast:
+; X64:       # %bb.0: # %allocas
+; X64-NEXT:    vextractf128 $1, %ymm0, %xmm1
+; X64-NEXT:    vmovd %edi, %xmm2
+; X64-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],zero,xmm2[0],zero
+; X64-NEXT:    vpsrlq %xmm2, %xmm1, %xmm3
+; X64-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm2[1,1]
+; X64-NEXT:    vpsrlq %xmm4, %xmm1, %xmm1
+; X64-NEXT:    vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
+; X64-NEXT:    vpsrlq %xmm2, %xmm0, %xmm2
+; X64-NEXT:    vpsrlq %xmm4, %xmm0, %xmm0
+; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X64-NEXT:    vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; X64-NEXT:    retq
+allocas:
+  %smear.0 = insertelement <8 x i32> zeroinitializer, i32 %shiftval, i32 0
+  %smear.1 = insertelement <8 x i32> %smear.0, i32 %shiftval, i32 2
+  %smear.2 = insertelement <8 x i32> %smear.1, i32 %shiftval, i32 4
+  %smear.3 = insertelement <8 x i32> %smear.2, i32 %shiftval, i32 6
+  %smear.4 = bitcast <8 x i32> %smear.3 to <4 x i64>
+  %bitop = lshr <4 x i64> %input, %smear.4
+  ret <4 x i64> %bitop
+}


        


More information about the llvm-commits mailing list