[llvm] r284308 - [X86][SSE] Added some basic examples of knownbits failing for vector types

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Oct 15 12:29:26 PDT 2016


Author: rksimon
Date: Sat Oct 15 14:29:26 2016
New Revision: 284308

URL: http://llvm.org/viewvc/llvm-project?rev=284308&view=rev
Log:
[X86][SSE] Added some basic examples of knownbits failing for vector types

computeKnownBits only returns the common bits of each vector element instead of only the elements that are actually used

Added:
    llvm/trunk/test/CodeGen/X86/known-bits-vector.ll

Added: llvm/trunk/test/CodeGen/X86/known-bits-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/known-bits-vector.ll?rev=284308&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/known-bits-vector.ll (added)
+++ llvm/trunk/test/CodeGen/X86/known-bits-vector.ll Sat Oct 15 14:29:26 2016
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
+
+define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
+; X32-LABEL: knownbits_mask_extract_sext:
+; X32:       # BB#0:
+; X32-NEXT:    vandps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT:    vmovd %xmm0, %eax
+; X32-NEXT:    cwtl
+; X32-NEXT:    retl
+;
+; X64-LABEL: knownbits_mask_extract_sext:
+; X64:       # BB#0:
+; X64-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vmovd %xmm0, %eax
+; X64-NEXT:    cwtl
+; X64-NEXT:    retq
+  %1 = and <8 x i16> %a0, <i16 15, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+  %2 = extractelement <8 x i16> %1, i32 0
+  %3 = sext i16 %2 to i32
+  ret i32 %3
+}
+
+define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
+; X32-LABEL: knownbits_mask_extract_uitofp:
+; X32:       # BB#0:
+; X32-NEXT:    pushl %ebp
+; X32-NEXT:    movl %esp, %ebp
+; X32-NEXT:    andl $-8, %esp
+; X32-NEXT:    subl $16, %esp
+; X32-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
+; X32-NEXT:    vpextrd $1, %xmm0, %eax
+; X32-NEXT:    vmovq %xmm0, {{[0-9]+}}(%esp)
+; X32-NEXT:    xorl %ecx, %ecx
+; X32-NEXT:    testl %eax, %eax
+; X32-NEXT:    setns %cl
+; X32-NEXT:    fildll {{[0-9]+}}(%esp)
+; X32-NEXT:    fadds {{\.LCPI.*}}(,%ecx,4)
+; X32-NEXT:    fstps {{[0-9]+}}(%esp)
+; X32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X32-NEXT:    vmovss %xmm0, (%esp)
+; X32-NEXT:    flds (%esp)
+; X32-NEXT:    movl %ebp, %esp
+; X32-NEXT:    popl %ebp
+; X32-NEXT:    retl
+;
+; X64-LABEL: knownbits_mask_extract_uitofp:
+; X64:       # BB#0:
+; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
+; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
+; X64-NEXT:    vmovq %xmm0, %rax
+; X64-NEXT:    testq %rax, %rax
+; X64-NEXT:    js .LBB1_1
+; X64-NEXT:  # BB#2:
+; X64-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; X64-NEXT:    retq
+; X64-NEXT:  .LBB1_1:
+; X64-NEXT:    movq %rax, %rcx
+; X64-NEXT:    shrq %rcx
+; X64-NEXT:    andl $1, %eax
+; X64-NEXT:    orq %rcx, %rax
+; X64-NEXT:    vcvtsi2ssq %rax, %xmm2, %xmm0
+; X64-NEXT:    vaddss %xmm0, %xmm0, %xmm0
+; X64-NEXT:    retq
+  %1 = and <2 x i64> %a0, <i64 65535, i64 -1>
+  %2 = extractelement <2 x i64> %1, i32 0
+  %3 = uitofp i64 %2 to float
+  ret float %3
+}
+
+define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
+; X32-LABEL: knownbits_mask_shuffle_sext:
+; X32:       # BB#0:
+; X32-NEXT:    vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X32-NEXT:    vpmovsxwd %xmm0, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: knownbits_mask_shuffle_sext:
+; X64:       # BB#0:
+; X64-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; X64-NEXT:    vpmovsxwd %xmm0, %xmm0
+; X64-NEXT:    retq
+  %1 = and <8 x i16> %a0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 15, i16 15, i16 15, i16 15>
+  %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %3 = sext <4 x i16> %2 to <4 x i32>
+  ret <4 x i32> %3
+}
+
+define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
+; X32-LABEL: knownbits_mask_shuffle_uitofp:
+; X32:       # BB#0:
+; X32-NEXT:    vpand {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; X32-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X32-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X32-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X32-NEXT:    vaddps {{\.LCPI.*}}, %xmm0, %xmm0
+; X32-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; X32-NEXT:    retl
+;
+; X64-LABEL: knownbits_mask_shuffle_uitofp:
+; X64:       # BB#0:
+; X64-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; X64-NEXT:    vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X64-NEXT:    vpsrld $16, %xmm0, %xmm0
+; X64-NEXT:    vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
+; X64-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
+; X64-NEXT:    vaddps %xmm0, %xmm1, %xmm0
+; X64-NEXT:    retq
+  %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
+  %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
+  %3 = uitofp <4 x i32> %2 to <4 x float>
+  ret <4 x float> %3
+}




More information about the llvm-commits mailing list