[llvm] 7f07195 - [X86] Add test cases for v2i64->v2f32 strict_sint_to_fp/strict_uint_to_fp.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 26 08:58:53 PST 2019


Author: Craig Topper
Date: 2019-12-26T08:58:34-08:00
New Revision: 7f071958cde4f168823b4d9cfb9d507e69de88cb

URL: https://github.com/llvm/llvm-project/commit/7f071958cde4f168823b4d9cfb9d507e69de88cb
DIFF: https://github.com/llvm/llvm-project/commit/7f071958cde4f168823b4d9cfb9d507e69de88cb.diff

LOG: [X86] Add test cases for v2i64->v2f32 strict_sint_to_fp/strict_uint_to_fp.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
index a173e5cb4154..4b4415212d7c 100644
--- a/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
+++ b/llvm/test/CodeGen/X86/vec-strict-inttofp-128.ll
@@ -12,6 +12,8 @@
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=avx512dq,avx512vl -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=AVX,AVX512DQVL,AVX512DQVL-32
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512dq,avx512vl -O3 -disable-strictnode-mutation | FileCheck %s --check-prefixes=AVX,AVX512DQVL,AVX512DQVL-64
 
+declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
+declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
@@ -31,6 +33,315 @@ declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
 
+define <2 x float> @sitofp_v2i64_v2f32(<2 x i64> %x) #0 {
+; SSE-32-LABEL: sitofp_v2i64_v2f32:
+; SSE-32:       # %bb.0:
+; SSE-32-NEXT:    pushl %ebp
+; SSE-32-NEXT:    .cfi_def_cfa_offset 8
+; SSE-32-NEXT:    .cfi_offset %ebp, -8
+; SSE-32-NEXT:    movl %esp, %ebp
+; SSE-32-NEXT:    .cfi_def_cfa_register %ebp
+; SSE-32-NEXT:    andl $-8, %esp
+; SSE-32-NEXT:    subl $24, %esp
+; SSE-32-NEXT:    movq %xmm0, {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE-32-NEXT:    movq %xmm0, {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    fstps (%esp)
+; SSE-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    fstps {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-32-NEXT:    movl %ebp, %esp
+; SSE-32-NEXT:    popl %ebp
+; SSE-32-NEXT:    .cfi_def_cfa %esp, 4
+; SSE-32-NEXT:    retl
+;
+; SSE-64-LABEL: sitofp_v2i64_v2f32:
+; SSE-64:       # %bb.0:
+; SSE-64-NEXT:    movq %xmm0, %rax
+; SSE-64-NEXT:    cvtsi2ss %rax, %xmm1
+; SSE-64-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE-64-NEXT:    movq %xmm0, %rax
+; SSE-64-NEXT:    xorps %xmm0, %xmm0
+; SSE-64-NEXT:    cvtsi2ss %rax, %xmm0
+; SSE-64-NEXT:    unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-64-NEXT:    movaps %xmm1, %xmm0
+; SSE-64-NEXT:    retq
+;
+; AVX-32-LABEL: sitofp_v2i64_v2f32:
+; AVX-32:       # %bb.0:
+; AVX-32-NEXT:    pushl %ebp
+; AVX-32-NEXT:    .cfi_def_cfa_offset 8
+; AVX-32-NEXT:    .cfi_offset %ebp, -8
+; AVX-32-NEXT:    movl %esp, %ebp
+; AVX-32-NEXT:    .cfi_def_cfa_register %ebp
+; AVX-32-NEXT:    andl $-8, %esp
+; AVX-32-NEXT:    subl $24, %esp
+; AVX-32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX-32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    fstps (%esp)
+; AVX-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    fstps {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-32-NEXT:    movl %ebp, %esp
+; AVX-32-NEXT:    popl %ebp
+; AVX-32-NEXT:    .cfi_def_cfa %esp, 4
+; AVX-32-NEXT:    retl
+;
+; AVX-64-LABEL: sitofp_v2i64_v2f32:
+; AVX-64:       # %bb.0:
+; AVX-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX-64-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
+; AVX-64-NEXT:    vmovq %xmm0, %rax
+; AVX-64-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
+; AVX-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX-64-NEXT:    retq
+;
+; AVX512DQ-32-LABEL: sitofp_v2i64_v2f32:
+; AVX512DQ-32:       # %bb.0:
+; AVX512DQ-32-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512DQ-32-NEXT:    vcvtqq2ps %zmm0, %ymm1
+; AVX512DQ-32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512DQ-32-NEXT:    vcvtqq2ps %zmm0, %ymm0
+; AVX512DQ-32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVX512DQ-32-NEXT:    vzeroupper
+; AVX512DQ-32-NEXT:    retl
+;
+; AVX512DQ-64-LABEL: sitofp_v2i64_v2f32:
+; AVX512DQ-64:       # %bb.0:
+; AVX512DQ-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512DQ-64-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
+; AVX512DQ-64-NEXT:    vmovq %xmm0, %rax
+; AVX512DQ-64-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
+; AVX512DQ-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512DQ-64-NEXT:    retq
+;
+; AVX512DQVL-32-LABEL: sitofp_v2i64_v2f32:
+; AVX512DQVL-32:       # %bb.0:
+; AVX512DQVL-32-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512DQVL-32-NEXT:    vcvtqq2ps %ymm0, %xmm1
+; AVX512DQVL-32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512DQVL-32-NEXT:    vcvtqq2ps %ymm0, %xmm0
+; AVX512DQVL-32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVX512DQVL-32-NEXT:    vzeroupper
+; AVX512DQVL-32-NEXT:    retl
+;
+; AVX512DQVL-64-LABEL: sitofp_v2i64_v2f32:
+; AVX512DQVL-64:       # %bb.0:
+; AVX512DQVL-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512DQVL-64-NEXT:    vcvtsi2ss %rax, %xmm1, %xmm1
+; AVX512DQVL-64-NEXT:    vmovq %xmm0, %rax
+; AVX512DQVL-64-NEXT:    vcvtsi2ss %rax, %xmm2, %xmm0
+; AVX512DQVL-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512DQVL-64-NEXT:    retq
+ %result = call <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64> %x,
+                                                              metadata !"round.dynamic",
+                                                              metadata !"fpexcept.strict") #0
+  ret <2 x float> %result
+}
+
+define <2 x float> @uitofp_v2i64_v2f32(<2 x i64> %x) #0 {
+; SSE-32-LABEL: uitofp_v2i64_v2f32:
+; SSE-32:       # %bb.0:
+; SSE-32-NEXT:    pushl %ebp
+; SSE-32-NEXT:    .cfi_def_cfa_offset 8
+; SSE-32-NEXT:    .cfi_offset %ebp, -8
+; SSE-32-NEXT:    movl %esp, %ebp
+; SSE-32-NEXT:    .cfi_def_cfa_register %ebp
+; SSE-32-NEXT:    andl $-8, %esp
+; SSE-32-NEXT:    subl $24, %esp
+; SSE-32-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE-32-NEXT:    movq %xmm1, {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    movq %xmm0, {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; SSE-32-NEXT:    movd %xmm1, %eax
+; SSE-32-NEXT:    xorl %ecx, %ecx
+; SSE-32-NEXT:    testl %eax, %eax
+; SSE-32-NEXT:    setns %cl
+; SSE-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    fadds {{\.LCPI.*}}(,%ecx,4)
+; SSE-32-NEXT:    fstps (%esp)
+; SSE-32-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE-32-NEXT:    movd %xmm0, %eax
+; SSE-32-NEXT:    xorl %ecx, %ecx
+; SSE-32-NEXT:    testl %eax, %eax
+; SSE-32-NEXT:    setns %cl
+; SSE-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    fadds {{\.LCPI.*}}(,%ecx,4)
+; SSE-32-NEXT:    fstps {{[0-9]+}}(%esp)
+; SSE-32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSE-32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-32-NEXT:    movl %ebp, %esp
+; SSE-32-NEXT:    popl %ebp
+; SSE-32-NEXT:    .cfi_def_cfa %esp, 4
+; SSE-32-NEXT:    retl
+;
+; SSE-64-LABEL: uitofp_v2i64_v2f32:
+; SSE-64:       # %bb.0:
+; SSE-64-NEXT:    movdqa %xmm0, %xmm1
+; SSE-64-NEXT:    movq %xmm0, %rax
+; SSE-64-NEXT:    movq %rax, %rcx
+; SSE-64-NEXT:    shrq %rcx
+; SSE-64-NEXT:    movl %eax, %edx
+; SSE-64-NEXT:    andl $1, %edx
+; SSE-64-NEXT:    orq %rcx, %rdx
+; SSE-64-NEXT:    testq %rax, %rax
+; SSE-64-NEXT:    cmovnsq %rax, %rdx
+; SSE-64-NEXT:    xorps %xmm0, %xmm0
+; SSE-64-NEXT:    cvtsi2ss %rdx, %xmm0
+; SSE-64-NEXT:    jns .LBB1_2
+; SSE-64-NEXT:  # %bb.1:
+; SSE-64-NEXT:    addss %xmm0, %xmm0
+; SSE-64-NEXT:  .LBB1_2:
+; SSE-64-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSE-64-NEXT:    movq %xmm1, %rax
+; SSE-64-NEXT:    movq %rax, %rcx
+; SSE-64-NEXT:    shrq %rcx
+; SSE-64-NEXT:    movl %eax, %edx
+; SSE-64-NEXT:    andl $1, %edx
+; SSE-64-NEXT:    orq %rcx, %rdx
+; SSE-64-NEXT:    testq %rax, %rax
+; SSE-64-NEXT:    cmovnsq %rax, %rdx
+; SSE-64-NEXT:    xorps %xmm1, %xmm1
+; SSE-64-NEXT:    cvtsi2ss %rdx, %xmm1
+; SSE-64-NEXT:    jns .LBB1_4
+; SSE-64-NEXT:  # %bb.3:
+; SSE-64-NEXT:    addss %xmm1, %xmm1
+; SSE-64-NEXT:  .LBB1_4:
+; SSE-64-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE-64-NEXT:    retq
+;
+; AVX-32-LABEL: uitofp_v2i64_v2f32:
+; AVX-32:       # %bb.0:
+; AVX-32-NEXT:    pushl %ebp
+; AVX-32-NEXT:    .cfi_def_cfa_offset 8
+; AVX-32-NEXT:    .cfi_offset %ebp, -8
+; AVX-32-NEXT:    movl %esp, %ebp
+; AVX-32-NEXT:    .cfi_def_cfa_register %ebp
+; AVX-32-NEXT:    andl $-8, %esp
+; AVX-32-NEXT:    subl $24, %esp
+; AVX-32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; AVX-32-NEXT:    vmovlps %xmm1, {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    vextractps $1, %xmm0, %eax
+; AVX-32-NEXT:    xorl %ecx, %ecx
+; AVX-32-NEXT:    testl %eax, %eax
+; AVX-32-NEXT:    setns %cl
+; AVX-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    fadds {{\.LCPI.*}}(,%ecx,4)
+; AVX-32-NEXT:    fstps {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    vextractps $3, %xmm0, %eax
+; AVX-32-NEXT:    xorl %ecx, %ecx
+; AVX-32-NEXT:    testl %eax, %eax
+; AVX-32-NEXT:    setns %cl
+; AVX-32-NEXT:    fildll {{[0-9]+}}(%esp)
+; AVX-32-NEXT:    fadds {{\.LCPI.*}}(,%ecx,4)
+; AVX-32-NEXT:    fstps (%esp)
+; AVX-32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
+; AVX-32-NEXT:    movl %ebp, %esp
+; AVX-32-NEXT:    popl %ebp
+; AVX-32-NEXT:    .cfi_def_cfa %esp, 4
+; AVX-32-NEXT:    retl
+;
+; AVX1-64-LABEL: uitofp_v2i64_v2f32:
+; AVX1-64:       # %bb.0:
+; AVX1-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX1-64-NEXT:    movq %rax, %rcx
+; AVX1-64-NEXT:    shrq %rcx
+; AVX1-64-NEXT:    movl %eax, %edx
+; AVX1-64-NEXT:    andl $1, %edx
+; AVX1-64-NEXT:    orq %rcx, %rdx
+; AVX1-64-NEXT:    testq %rax, %rax
+; AVX1-64-NEXT:    cmovnsq %rax, %rdx
+; AVX1-64-NEXT:    vcvtsi2ss %rdx, %xmm1, %xmm1
+; AVX1-64-NEXT:    jns .LBB1_2
+; AVX1-64-NEXT:  # %bb.1:
+; AVX1-64-NEXT:    vaddss %xmm1, %xmm1, %xmm1
+; AVX1-64-NEXT:  .LBB1_2:
+; AVX1-64-NEXT:    vmovq %xmm0, %rax
+; AVX1-64-NEXT:    movq %rax, %rcx
+; AVX1-64-NEXT:    shrq %rcx
+; AVX1-64-NEXT:    movl %eax, %edx
+; AVX1-64-NEXT:    andl $1, %edx
+; AVX1-64-NEXT:    orq %rcx, %rdx
+; AVX1-64-NEXT:    testq %rax, %rax
+; AVX1-64-NEXT:    cmovnsq %rax, %rdx
+; AVX1-64-NEXT:    vcvtsi2ss %rdx, %xmm2, %xmm0
+; AVX1-64-NEXT:    jns .LBB1_4
+; AVX1-64-NEXT:  # %bb.3:
+; AVX1-64-NEXT:    vaddss %xmm0, %xmm0, %xmm0
+; AVX1-64-NEXT:  .LBB1_4:
+; AVX1-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX1-64-NEXT:    retq
+;
+; AVX512F-64-LABEL: uitofp_v2i64_v2f32:
+; AVX512F-64:       # %bb.0:
+; AVX512F-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512F-64-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
+; AVX512F-64-NEXT:    vmovq %xmm0, %rax
+; AVX512F-64-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
+; AVX512F-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512F-64-NEXT:    retq
+;
+; AVX512VL-64-LABEL: uitofp_v2i64_v2f32:
+; AVX512VL-64:       # %bb.0:
+; AVX512VL-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512VL-64-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
+; AVX512VL-64-NEXT:    vmovq %xmm0, %rax
+; AVX512VL-64-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
+; AVX512VL-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512VL-64-NEXT:    retq
+;
+; AVX512DQ-32-LABEL: uitofp_v2i64_v2f32:
+; AVX512DQ-32:       # %bb.0:
+; AVX512DQ-32-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
+; AVX512DQ-32-NEXT:    vcvtuqq2ps %zmm0, %ymm1
+; AVX512DQ-32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512DQ-32-NEXT:    vcvtuqq2ps %zmm0, %ymm0
+; AVX512DQ-32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVX512DQ-32-NEXT:    vzeroupper
+; AVX512DQ-32-NEXT:    retl
+;
+; AVX512DQ-64-LABEL: uitofp_v2i64_v2f32:
+; AVX512DQ-64:       # %bb.0:
+; AVX512DQ-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512DQ-64-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
+; AVX512DQ-64-NEXT:    vmovq %xmm0, %rax
+; AVX512DQ-64-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
+; AVX512DQ-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512DQ-64-NEXT:    retq
+;
+; AVX512DQVL-32-LABEL: uitofp_v2i64_v2f32:
+; AVX512DQVL-32:       # %bb.0:
+; AVX512DQVL-32-NEXT:    # kill: def $xmm0 killed $xmm0 def $ymm0
+; AVX512DQVL-32-NEXT:    vcvtuqq2ps %ymm0, %xmm1
+; AVX512DQVL-32-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; AVX512DQVL-32-NEXT:    vcvtuqq2ps %ymm0, %xmm0
+; AVX512DQVL-32-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
+; AVX512DQVL-32-NEXT:    vzeroupper
+; AVX512DQVL-32-NEXT:    retl
+;
+; AVX512DQVL-64-LABEL: uitofp_v2i64_v2f32:
+; AVX512DQVL-64:       # %bb.0:
+; AVX512DQVL-64-NEXT:    vpextrq $1, %xmm0, %rax
+; AVX512DQVL-64-NEXT:    vcvtusi2ss %rax, %xmm1, %xmm1
+; AVX512DQVL-64-NEXT:    vmovq %xmm0, %rax
+; AVX512DQVL-64-NEXT:    vcvtusi2ss %rax, %xmm2, %xmm0
+; AVX512DQVL-64-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
+; AVX512DQVL-64-NEXT:    retq
+ %result = call <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64> %x,
+                                                              metadata !"round.dynamic",
+                                                              metadata !"fpexcept.strict") #0
+  ret <2 x float> %result
+}
+
 define <4 x float> @sitofp_v4i1_v4f32(<4 x i1> %x) #0 {
 ; SSE-LABEL: sitofp_v4i1_v4f32:
 ; SSE:       # %bb.0:


        


More information about the llvm-commits mailing list