[llvm] be851c6 - [X86] Add SSE/AVX1/AVX2 + f16/f32 test coverage to splat(fpext) tests

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Sat Jan 14 06:02:33 PST 2023


Author: Simon Pilgrim
Date: 2023-01-14T14:02:17Z
New Revision: be851c674f369a4316c8aeff90d9cb99c7d5df57

URL: https://github.com/llvm/llvm-project/commit/be851c674f369a4316c8aeff90d9cb99c7d5df57
DIFF: https://github.com/llvm/llvm-project/commit/be851c674f369a4316c8aeff90d9cb99c7d5df57.diff

LOG: [X86] Add SSE/AVX1/AVX2 + f16/f32 test coverage to splat(fpext) tests

As discussed on D141657

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/prefer-fpext-splat.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
index d5bfb0c1a0f8c..0cd4472f58b29 100644
--- a/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
+++ b/llvm/test/CodeGen/X86/prefer-fpext-splat.ll
@@ -1,17 +1,249 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown                           | FileCheck %s --check-prefixes=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx               | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2              | FileCheck %s --check-prefixes=AVX,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
 
-define <4 x double> @prefer(float* %p) {
-; CHECK-LABEL: prefer:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT:    vbroadcastsd %xmm0, %ymm0
-; CHECK-NEXT:    retq
+define <2 x double> @prefer_f32_v2f64(ptr %p) nounwind {
+; SSE-LABEL: prefer_f32_v2f64:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: prefer_f32_v2f64:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX-NEXT:    retq
 entry:
-  %0 = load float, float* %p, align 4
+  %0 = load float, ptr %p, align 4
+  %vecinit.i = insertelement <2 x float> undef, float %0, i64 0
+  %vecinit3.i = shufflevector <2 x float> %vecinit.i, <2 x float> poison, <2 x i32> zeroinitializer
+  %conv.i = fpext <2 x float> %vecinit3.i to <2 x double>
+  ret <2 x double> %conv.i
+}
+
+define <4 x double> @prefer_f32_v4f64(ptr %p) nounwind {
+; SSE-LABEL: prefer_f32_v4f64:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE-NEXT:    movaps %xmm0, %xmm1
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: prefer_f32_v4f64:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: prefer_f32_v4f64:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX2-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX2-NEXT:    vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: prefer_f32_v4f64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = load float, ptr %p, align 4
   %vecinit.i = insertelement <4 x float> undef, float %0, i64 0
   %vecinit3.i = shufflevector <4 x float> %vecinit.i, <4 x float> poison, <4 x i32> zeroinitializer
   %conv.i = fpext <4 x float> %vecinit3.i to <4 x double>
   ret <4 x double> %conv.i
 }
+
+define <4 x float> @prefer_f16_v4f32(ptr %p) nounwind {
+; SSE-LABEL: prefer_f16_v4f32:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    pushq %rax
+; SSE-NEXT:    pinsrw $0, (%rdi), %xmm0
+; SSE-NEXT:    callq __extendhfsf2 at PLT
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT:    popq %rax
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: prefer_f16_v4f32:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    pushq %rax
+; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    callq __extendhfsf2 at PLT
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT:    popq %rax
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: prefer_f16_v4f32:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    pushq %rax
+; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX2-NEXT:    callq __extendhfsf2 at PLT
+; AVX2-NEXT:    vpbroadcastd %xmm0, %xmm0
+; AVX2-NEXT:    popq %rax
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: prefer_f16_v4f32:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = load half, ptr %p, align 4
+  %vecinit.i = insertelement <4 x half> undef, half %0, i64 0
+  %vecinit3.i = shufflevector <4 x half> %vecinit.i, <4 x half> poison, <4 x i32> zeroinitializer
+  %conv.i = fpext <4 x half> %vecinit3.i to <4 x float>
+  ret <4 x float> %conv.i
+}
+
+define <8 x float> @prefer_f16_v8f32(ptr %p) nounwind {
+; SSE-LABEL: prefer_f16_v8f32:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    pushq %rax
+; SSE-NEXT:    pinsrw $0, (%rdi), %xmm0
+; SSE-NEXT:    callq __extendhfsf2 at PLT
+; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; SSE-NEXT:    movaps %xmm0, %xmm1
+; SSE-NEXT:    popq %rax
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: prefer_f16_v8f32:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    pushq %rax
+; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    callq __extendhfsf2 at PLT
+; AVX1-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    popq %rax
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: prefer_f16_v8f32:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    pushq %rax
+; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX2-NEXT:    callq __extendhfsf2 at PLT
+; AVX2-NEXT:    vpbroadcastd %xmm0, %ymm0
+; AVX2-NEXT:    popq %rax
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: prefer_f16_v8f32:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vbroadcastss %xmm0, %ymm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = load half, ptr %p, align 4
+  %vecinit.i = insertelement <8 x half> undef, half %0, i64 0
+  %vecinit3.i = shufflevector <8 x half> %vecinit.i, <8 x half> poison, <8 x i32> zeroinitializer
+  %conv.i = fpext <8 x half> %vecinit3.i to <8 x float>
+  ret <8 x float> %conv.i
+}
+
+define <2 x double> @prefer_f16_v2f64(ptr %p) nounwind {
+; SSE-LABEL: prefer_f16_v2f64:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    pushq %rax
+; SSE-NEXT:    pinsrw $0, (%rdi), %xmm0
+; SSE-NEXT:    callq __extendhfsf2 at PLT
+; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE-NEXT:    popq %rax
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: prefer_f16_v2f64:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    pushq %rax
+; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    callq __extendhfsf2 at PLT
+; AVX1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT:    popq %rax
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: prefer_f16_v2f64:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    pushq %rax
+; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX2-NEXT:    callq __extendhfsf2 at PLT
+; AVX2-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX2-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX2-NEXT:    popq %rax
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: prefer_f16_v2f64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX512-NEXT:    retq
+entry:
+  %0 = load half, ptr %p, align 4
+  %vecinit.i = insertelement <2 x half> undef, half %0, i64 0
+  %vecinit3.i = shufflevector <2 x half> %vecinit.i, <2 x half> poison, <2 x i32> zeroinitializer
+  %conv.i = fpext <2 x half> %vecinit3.i to <2 x double>
+  ret <2 x double> %conv.i
+}
+
+define <4 x double> @prefer_f16_v4f64(ptr %p) nounwind {
+; SSE-LABEL: prefer_f16_v4f64:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    pushq %rax
+; SSE-NEXT:    pinsrw $0, (%rdi), %xmm0
+; SSE-NEXT:    callq __extendhfsf2 at PLT
+; SSE-NEXT:    cvtss2sd %xmm0, %xmm0
+; SSE-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0,0]
+; SSE-NEXT:    movaps %xmm0, %xmm1
+; SSE-NEXT:    popq %rax
+; SSE-NEXT:    retq
+;
+; AVX1-LABEL: prefer_f16_v4f64:
+; AVX1:       # %bb.0: # %entry
+; AVX1-NEXT:    pushq %rax
+; AVX1-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX1-NEXT:    callq __extendhfsf2 at PLT
+; AVX1-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX1-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
+; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
+; AVX1-NEXT:    popq %rax
+; AVX1-NEXT:    retq
+;
+; AVX2-LABEL: prefer_f16_v4f64:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    pushq %rax
+; AVX2-NEXT:    vpinsrw $0, (%rdi), %xmm0, %xmm0
+; AVX2-NEXT:    callq __extendhfsf2 at PLT
+; AVX2-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX2-NEXT:    vbroadcastsd %xmm0, %ymm0
+; AVX2-NEXT:    popq %rax
+; AVX2-NEXT:    retq
+;
+; AVX512-LABEL: prefer_f16_v4f64:
+; AVX512:       # %bb.0: # %entry
+; AVX512-NEXT:    movzwl (%rdi), %eax
+; AVX512-NEXT:    vmovd %eax, %xmm0
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm0
+; AVX512-NEXT:    vcvtss2sd %xmm0, %xmm0, %xmm0
+; AVX512-NEXT:    vbroadcastsd %xmm0, %ymm0
+; AVX512-NEXT:    retq
+entry:
+  %0 = load half, ptr %p, align 4
+  %vecinit.i = insertelement <4 x half> undef, half %0, i64 0
+  %vecinit3.i = shufflevector <4 x half> %vecinit.i, <4 x half> poison, <4 x i32> zeroinitializer
+  %conv.i = fpext <4 x half> %vecinit3.i to <4 x double>
+  ret <4 x double> %conv.i
+}


        


More information about the llvm-commits mailing list