[llvm] [X86][FP16] Fix crash issue when AVX512VL is not set (PR #119309)

Phoebe Wang via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 10 02:10:41 PST 2024


https://github.com/phoebewang updated https://github.com/llvm/llvm-project/pull/119309

>From 8690acd52e7c37eca9f6b30d1eb0d497de286aa8 Mon Sep 17 00:00:00 2001
From: "Wang, Phoebe" <phoebe.wang at intel.com>
Date: Tue, 10 Dec 2024 10:19:17 +0800
Subject: [PATCH 1/2] [X86][FP16] Fix crash issue when AVX512VL is not set

Fixes problem reported on #116153.
---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |   3 +
 .../test/CodeGen/X86/avx512-insert-extract.ll |  29 ++-
 llvm/test/CodeGen/X86/avx512-vec-cmp.ll       |  54 +++--
 llvm/test/CodeGen/X86/fminimum-fmaximum.ll    | 202 ++++++++++++++++--
 llvm/test/CodeGen/X86/pr116153.ll             |  50 +++++
 .../CodeGen/X86/vector-reduce-fmax-nnan.ll    |   9 +-
 .../CodeGen/X86/vector-reduce-fmin-nnan.ll    |   9 +-
 7 files changed, 311 insertions(+), 45 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/pr116153.ll

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 3824d8c6c9c601..871de16d66b6c5 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -23566,6 +23566,9 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
 
     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
     if (isSoftF16(EltVT, Subtarget)) {
+      if (Subtarget.hasAVX512() && !Subtarget.hasVLX())
+        return SDValue();
+
       // Break 256-bit FP vector compare into smaller ones.
       if (OpVT.is256BitVector() && !Subtarget.useAVX512Regs())
         return splitVSETCC(VT, Op0, Op1, Cond, DAG, dl);
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 085581d27c62fb..7ce37c637a79ca 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -2159,11 +2159,30 @@ define i128 @test_insertelement_variable_v128i1(<128 x i8> %a, i8 %b, i32 %index
 define void @test_concat_v2i1(ptr %arg, ptr %arg1, ptr %arg2) nounwind {
 ; KNL-LABEL: test_concat_v2i1:
 ; KNL:       ## %bb.0:
-; KNL-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
-; KNL-NEXT:    vcvtph2ps %xmm0, %ymm0
-; KNL-NEXT:    vcmpltps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %k0
-; KNL-NEXT:    vxorps %xmm1, %xmm1, %xmm1
-; KNL-NEXT:    vcmpltps %zmm0, %zmm1, %k1
+; KNL-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
+; KNL-NEXT:    vcvtph2ps %xmm0, %xmm1
+; KNL-NEXT:    vmovss {{.*#+}} xmm2 = [6.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; KNL-NEXT:    vucomiss %xmm2, %xmm1
+; KNL-NEXT:    setb %al
+; KNL-NEXT:    andl $1, %eax
+; KNL-NEXT:    kmovw %eax, %k0
+; KNL-NEXT:    vpshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
+; KNL-NEXT:    vcvtph2ps %xmm0, %xmm0
+; KNL-NEXT:    vucomiss %xmm2, %xmm0
+; KNL-NEXT:    setb %al
+; KNL-NEXT:    kmovw %eax, %k1
+; KNL-NEXT:    kshiftlw $1, %k1, %k1
+; KNL-NEXT:    korw %k1, %k0, %k0
+; KNL-NEXT:    vxorps %xmm2, %xmm2, %xmm2
+; KNL-NEXT:    vucomiss %xmm2, %xmm1
+; KNL-NEXT:    seta %al
+; KNL-NEXT:    andl $1, %eax
+; KNL-NEXT:    kmovw %eax, %k1
+; KNL-NEXT:    vucomiss %xmm2, %xmm0
+; KNL-NEXT:    seta %al
+; KNL-NEXT:    kmovw %eax, %k2
+; KNL-NEXT:    kshiftlw $1, %k2, %k2
+; KNL-NEXT:    korw %k2, %k1, %k1
 ; KNL-NEXT:    kandw %k1, %k0, %k1
 ; KNL-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
 ; KNL-NEXT:    vpternlogd {{.*#+}} zmm1 {%k1} {z} = -1
diff --git a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
index 24eb9b3715ed63..5ce2b56cbd43a0 100644
--- a/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
+++ b/llvm/test/CodeGen/X86/avx512-vec-cmp.ll
@@ -1441,30 +1441,56 @@ define <4 x i32> @zext_bool_logic(<4 x i64> %cond1, <4 x i64> %cond2, <4 x i32>
 define void @half_vec_compare(ptr %x, ptr %y) {
 ; KNL-LABEL: half_vec_compare:
 ; KNL:       ## %bb.0: ## %entry
-; KNL-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; KNL-NEXT:    ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; KNL-NEXT:    vcvtph2ps %xmm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
-; KNL-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; KNL-NEXT:    vcmpneqps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0xc2,0xc1,0x04]
-; KNL-NEXT:    vpmovdb %zmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x48,0x31,0xc0]
+; KNL-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; KNL-NEXT:    ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
+; KNL-NEXT:    vpshuflw $85, %xmm0, %xmm1 ## encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; KNL-NEXT:    ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
+; KNL-NEXT:    vcvtph2ps %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x13,0xc9]
+; KNL-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; KNL-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
+; KNL-NEXT:    vucomiss %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xca]
+; KNL-NEXT:    movl $65535, %ecx ## encoding: [0xb9,0xff,0xff,0x00,0x00]
+; KNL-NEXT:    ## imm = 0xFFFF
+; KNL-NEXT:    movl $0, %edx ## encoding: [0xba,0x00,0x00,0x00,0x00]
+; KNL-NEXT:    cmovnel %ecx, %edx ## encoding: [0x0f,0x45,0xd1]
+; KNL-NEXT:    cmovpl %ecx, %edx ## encoding: [0x0f,0x4a,0xd1]
+; KNL-NEXT:    vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
+; KNL-NEXT:    vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
+; KNL-NEXT:    cmovnel %ecx, %eax ## encoding: [0x0f,0x45,0xc1]
+; KNL-NEXT:    cmovpl %ecx, %eax ## encoding: [0x0f,0x4a,0xc1]
+; KNL-NEXT:    vmovd %eax, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
+; KNL-NEXT:    vpinsrw $1, %edx, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xc4,0xc2,0x01]
+; KNL-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x63,0xc0]
 ; KNL-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdb,0x05,A,A,A,A]
 ; KNL-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
 ; KNL-NEXT:    vpextrw $0, %xmm0, (%rsi) ## encoding: [0xc4,0xe3,0x79,0x15,0x06,0x00]
-; KNL-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
 ; KNL-NEXT:    retq ## encoding: [0xc3]
 ;
 ; AVX512BW-LABEL: half_vec_compare:
 ; AVX512BW:       ## %bb.0: ## %entry
-; AVX512BW-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; AVX512BW-NEXT:    ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x07]
-; AVX512BW-NEXT:    vcvtph2ps %xmm0, %ymm0 ## encoding: [0xc4,0xe2,0x7d,0x13,0xc0]
-; AVX512BW-NEXT:    vxorps %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf0,0x57,0xc9]
-; AVX512BW-NEXT:    vcmpneqps %ymm1, %ymm0, %ymm0 ## encoding: [0xc5,0xfc,0xc2,0xc1,0x04]
-; AVX512BW-NEXT:    vpmovdb %zmm0, %xmm0 ## encoding: [0x62,0xf2,0x7e,0x48,0x31,0xc0]
+; AVX512BW-NEXT:    vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512BW-NEXT:    ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0x07]
+; AVX512BW-NEXT:    vpshuflw $85, %xmm0, %xmm1 ## encoding: [0xc5,0xfb,0x70,0xc8,0x55]
+; AVX512BW-NEXT:    ## xmm1 = xmm0[1,1,1,1,4,5,6,7]
+; AVX512BW-NEXT:    vcvtph2ps %xmm1, %xmm1 ## encoding: [0xc4,0xe2,0x79,0x13,0xc9]
+; AVX512BW-NEXT:    xorl %eax, %eax ## encoding: [0x31,0xc0]
+; AVX512BW-NEXT:    vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
+; AVX512BW-NEXT:    vucomiss %xmm2, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xca]
+; AVX512BW-NEXT:    movl $65535, %ecx ## encoding: [0xb9,0xff,0xff,0x00,0x00]
+; AVX512BW-NEXT:    ## imm = 0xFFFF
+; AVX512BW-NEXT:    movl $0, %edx ## encoding: [0xba,0x00,0x00,0x00,0x00]
+; AVX512BW-NEXT:    cmovnel %ecx, %edx ## encoding: [0x0f,0x45,0xd1]
+; AVX512BW-NEXT:    cmovpl %ecx, %edx ## encoding: [0x0f,0x4a,0xd1]
+; AVX512BW-NEXT:    vcvtph2ps %xmm0, %xmm0 ## encoding: [0xc4,0xe2,0x79,0x13,0xc0]
+; AVX512BW-NEXT:    vucomiss %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x2e,0xc2]
+; AVX512BW-NEXT:    cmovnel %ecx, %eax ## encoding: [0x0f,0x45,0xc1]
+; AVX512BW-NEXT:    cmovpl %ecx, %eax ## encoding: [0x0f,0x4a,0xc1]
+; AVX512BW-NEXT:    vmovd %eax, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6e,0xc0]
+; AVX512BW-NEXT:    vpinsrw $1, %edx, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xc4,0xc2,0x01]
+; AVX512BW-NEXT:    vpacksswb %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x63,0xc0]
 ; AVX512BW-NEXT:    vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdb,0x05,A,A,A,A]
 ; AVX512BW-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte
 ; AVX512BW-NEXT:    vpextrw $0, %xmm0, (%rsi) ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x79,0x15,0x06,0x00]
-; AVX512BW-NEXT:    vzeroupper ## encoding: [0xc5,0xf8,0x77]
 ; AVX512BW-NEXT:    retq ## encoding: [0xc3]
 ;
 ; SKX-LABEL: half_vec_compare:
diff --git a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
index 77d7cdb3ac879f..f576edcb82a51d 100644
--- a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
+++ b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
@@ -1641,26 +1641,188 @@ define <4 x half> @test_fmaximum_v4f16(<4 x half> %x, <4 x half> %y) {
 ;
 ; AVX512-LABEL: test_fmaximum_v4f16:
 ; AVX512:       # %bb.0:
-; AVX512-NEXT:    vcvtph2ps %xmm0, %ymm2
-; AVX512-NEXT:    vcvtph2ps %xmm1, %ymm3
-; AVX512-NEXT:    vcmpltps %ymm2, %ymm3, %ymm4
-; AVX512-NEXT:    vpmovdw %zmm4, %ymm4
-; AVX512-NEXT:    vpblendvb %xmm4, %xmm0, %xmm1, %xmm4
-; AVX512-NEXT:    vcmpunordps %ymm3, %ymm2, %ymm2
-; AVX512-NEXT:    vpmovdw %zmm2, %ymm2
-; AVX512-NEXT:    vpbroadcastw {{.*#+}} xmm3 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
-; AVX512-NEXT:    vpblendvb %xmm2, %xmm3, %xmm4, %xmm2
-; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512-NEXT:    vpcmpeqw %xmm3, %xmm0, %xmm4
-; AVX512-NEXT:    vpblendvb %xmm4, %xmm0, %xmm2, %xmm0
-; AVX512-NEXT:    vpcmpeqw %xmm3, %xmm1, %xmm3
-; AVX512-NEXT:    vpblendvb %xmm3, %xmm1, %xmm0, %xmm0
-; AVX512-NEXT:    vcvtph2ps %xmm2, %ymm1
-; AVX512-NEXT:    vpxor %xmm3, %xmm3, %xmm3
-; AVX512-NEXT:    vcmpeqps %ymm3, %ymm1, %ymm1
-; AVX512-NEXT:    vpmovdw %zmm1, %ymm1
-; AVX512-NEXT:    vpblendvb %xmm1, %xmm0, %xmm2, %xmm0
-; AVX512-NEXT:    vzeroupper
+; AVX512-NEXT:    pushq %rbp
+; AVX512-NEXT:    .cfi_def_cfa_offset 16
+; AVX512-NEXT:    pushq %r15
+; AVX512-NEXT:    .cfi_def_cfa_offset 24
+; AVX512-NEXT:    pushq %r14
+; AVX512-NEXT:    .cfi_def_cfa_offset 32
+; AVX512-NEXT:    pushq %r13
+; AVX512-NEXT:    .cfi_def_cfa_offset 40
+; AVX512-NEXT:    pushq %r12
+; AVX512-NEXT:    .cfi_def_cfa_offset 48
+; AVX512-NEXT:    pushq %rbx
+; AVX512-NEXT:    .cfi_def_cfa_offset 56
+; AVX512-NEXT:    .cfi_offset %rbx, -56
+; AVX512-NEXT:    .cfi_offset %r12, -48
+; AVX512-NEXT:    .cfi_offset %r13, -40
+; AVX512-NEXT:    .cfi_offset %r14, -32
+; AVX512-NEXT:    .cfi_offset %r15, -24
+; AVX512-NEXT:    .cfi_offset %rbp, -16
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    xorl %eax, %eax
+; AVX512-NEXT:    vucomiss %xmm2, %xmm3
+; AVX512-NEXT:    movl $65535, %ecx # imm = 0xFFFF
+; AVX512-NEXT:    movl $0, %edx
+; AVX512-NEXT:    cmovpl %ecx, %edx
+; AVX512-NEXT:    movl $0, %edi
+; AVX512-NEXT:    cmoval %ecx, %edi
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm2 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    vucomiss %xmm2, %xmm3
+; AVX512-NEXT:    movl $0, %esi
+; AVX512-NEXT:    cmovpl %ecx, %esi
+; AVX512-NEXT:    movl $0, %r9d
+; AVX512-NEXT:    cmoval %ecx, %r9d
+; AVX512-NEXT:    vshufpd {{.*#+}} xmm2 = xmm1[1,0]
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vshufpd {{.*#+}} xmm3 = xmm0[1,0]
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    vucomiss %xmm2, %xmm3
+; AVX512-NEXT:    movl $0, %r8d
+; AVX512-NEXT:    cmovpl %ecx, %r8d
+; AVX512-NEXT:    movl $0, %r11d
+; AVX512-NEXT:    cmoval %ecx, %r11d
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[3,3,3,3,4,5,6,7]
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[3,3,3,3,4,5,6,7]
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    vucomiss %xmm2, %xmm3
+; AVX512-NEXT:    movl $0, %r10d
+; AVX512-NEXT:    cmovpl %ecx, %r10d
+; AVX512-NEXT:    movl $0, %ebp
+; AVX512-NEXT:    cmoval %ecx, %ebp
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    vucomiss %xmm2, %xmm3
+; AVX512-NEXT:    movl $0, %ebx
+; AVX512-NEXT:    cmovpl %ecx, %ebx
+; AVX512-NEXT:    movl $0, %r14d
+; AVX512-NEXT:    cmoval %ecx, %r14d
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm2 = xmm1[1,1,1,1,4,5,6,7]
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm0[1,1,1,1,4,5,6,7]
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    vucomiss %xmm2, %xmm3
+; AVX512-NEXT:    movl $0, %r15d
+; AVX512-NEXT:    cmovpl %ecx, %r15d
+; AVX512-NEXT:    movl $0, %r12d
+; AVX512-NEXT:    cmoval %ecx, %r12d
+; AVX512-NEXT:    vcvtph2ps %xmm1, %xmm2
+; AVX512-NEXT:    vcvtph2ps %xmm0, %xmm3
+; AVX512-NEXT:    vucomiss %xmm2, %xmm3
+; AVX512-NEXT:    movl $0, %r13d
+; AVX512-NEXT:    cmoval %ecx, %r13d
+; AVX512-NEXT:    vmovd %r13d, %xmm2
+; AVX512-NEXT:    vpinsrw $1, %r12d, %xmm2, %xmm2
+; AVX512-NEXT:    vpinsrw $2, %r14d, %xmm2, %xmm2
+; AVX512-NEXT:    vpinsrw $3, %ebp, %xmm2, %xmm2
+; AVX512-NEXT:    vpinsrw $4, %r11d, %xmm2, %xmm2
+; AVX512-NEXT:    vpinsrw $5, %r9d, %xmm2, %xmm2
+; AVX512-NEXT:    vpinsrw $6, %edi, %xmm2, %xmm2
+; AVX512-NEXT:    movl $0, %edi
+; AVX512-NEXT:    cmovpl %ecx, %edi
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm3 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm4 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm4, %xmm4
+; AVX512-NEXT:    vucomiss %xmm3, %xmm4
+; AVX512-NEXT:    movl $0, %r9d
+; AVX512-NEXT:    cmoval %ecx, %r9d
+; AVX512-NEXT:    vpinsrw $7, %r9d, %xmm2, %xmm2
+; AVX512-NEXT:    vpblendvb %xmm2, %xmm0, %xmm1, %xmm2
+; AVX512-NEXT:    vmovd %edi, %xmm3
+; AVX512-NEXT:    vpinsrw $1, %r15d, %xmm3, %xmm3
+; AVX512-NEXT:    vpinsrw $2, %ebx, %xmm3, %xmm3
+; AVX512-NEXT:    vpinsrw $3, %r10d, %xmm3, %xmm3
+; AVX512-NEXT:    vpinsrw $4, %r8d, %xmm3, %xmm3
+; AVX512-NEXT:    vpinsrw $5, %esi, %xmm3, %xmm3
+; AVX512-NEXT:    vpinsrw $6, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    movl $0, %edx
+; AVX512-NEXT:    cmovpl %ecx, %edx
+; AVX512-NEXT:    vpinsrw $7, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    vpbroadcastw {{.*#+}} xmm4 = [NaN,NaN,NaN,NaN,NaN,NaN,NaN,NaN]
+; AVX512-NEXT:    vpblendvb %xmm3, %xmm4, %xmm2, %xmm2
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm3 = xmm2[1,1,1,1,4,5,6,7]
+; AVX512-NEXT:    vcvtph2ps %xmm3, %xmm3
+; AVX512-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512-NEXT:    vucomiss %xmm4, %xmm3
+; AVX512-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; AVX512-NEXT:    cmovnel %eax, %edx
+; AVX512-NEXT:    cmovpl %eax, %edx
+; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm3
+; AVX512-NEXT:    vucomiss %xmm4, %xmm3
+; AVX512-NEXT:    movl $65535, %esi # imm = 0xFFFF
+; AVX512-NEXT:    cmovnel %eax, %esi
+; AVX512-NEXT:    cmovpl %eax, %esi
+; AVX512-NEXT:    vmovd %esi, %xmm3
+; AVX512-NEXT:    vpinsrw $1, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
+; AVX512-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT:    vucomiss %xmm4, %xmm5
+; AVX512-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; AVX512-NEXT:    cmovnel %eax, %edx
+; AVX512-NEXT:    cmovpl %eax, %edx
+; AVX512-NEXT:    vpinsrw $2, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    vpshuflw {{.*#+}} xmm5 = xmm2[3,3,3,3,4,5,6,7]
+; AVX512-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT:    vucomiss %xmm4, %xmm5
+; AVX512-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; AVX512-NEXT:    cmovnel %eax, %edx
+; AVX512-NEXT:    cmovpl %eax, %edx
+; AVX512-NEXT:    vpinsrw $3, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
+; AVX512-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT:    vucomiss %xmm4, %xmm5
+; AVX512-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; AVX512-NEXT:    cmovnel %eax, %edx
+; AVX512-NEXT:    cmovpl %eax, %edx
+; AVX512-NEXT:    vpinsrw $4, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT:    vucomiss %xmm4, %xmm5
+; AVX512-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; AVX512-NEXT:    cmovnel %eax, %edx
+; AVX512-NEXT:    cmovpl %eax, %edx
+; AVX512-NEXT:    vpinsrw $5, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    vpshufd {{.*#+}} xmm5 = xmm2[3,3,3,3]
+; AVX512-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT:    vucomiss %xmm4, %xmm5
+; AVX512-NEXT:    movl $65535, %edx # imm = 0xFFFF
+; AVX512-NEXT:    cmovnel %eax, %edx
+; AVX512-NEXT:    cmovpl %eax, %edx
+; AVX512-NEXT:    vpinsrw $6, %edx, %xmm3, %xmm3
+; AVX512-NEXT:    vpsrldq {{.*#+}} xmm5 = xmm2[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; AVX512-NEXT:    vcvtph2ps %xmm5, %xmm5
+; AVX512-NEXT:    vucomiss %xmm4, %xmm5
+; AVX512-NEXT:    cmovnel %eax, %ecx
+; AVX512-NEXT:    cmovpl %eax, %ecx
+; AVX512-NEXT:    vpinsrw $7, %ecx, %xmm3, %xmm3
+; AVX512-NEXT:    vpxor %xmm4, %xmm4, %xmm4
+; AVX512-NEXT:    vpcmpeqw %xmm4, %xmm0, %xmm5
+; AVX512-NEXT:    vpblendvb %xmm5, %xmm0, %xmm2, %xmm0
+; AVX512-NEXT:    vpcmpeqw %xmm4, %xmm1, %xmm4
+; AVX512-NEXT:    vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
+; AVX512-NEXT:    vpblendvb %xmm3, %xmm0, %xmm2, %xmm0
+; AVX512-NEXT:    popq %rbx
+; AVX512-NEXT:    .cfi_def_cfa_offset 48
+; AVX512-NEXT:    popq %r12
+; AVX512-NEXT:    .cfi_def_cfa_offset 40
+; AVX512-NEXT:    popq %r13
+; AVX512-NEXT:    .cfi_def_cfa_offset 32
+; AVX512-NEXT:    popq %r14
+; AVX512-NEXT:    .cfi_def_cfa_offset 24
+; AVX512-NEXT:    popq %r15
+; AVX512-NEXT:    .cfi_def_cfa_offset 16
+; AVX512-NEXT:    popq %rbp
+; AVX512-NEXT:    .cfi_def_cfa_offset 8
 ; AVX512-NEXT:    retq
 ;
 ; X86-LABEL: test_fmaximum_v4f16:
diff --git a/llvm/test/CodeGen/X86/pr116153.ll b/llvm/test/CodeGen/X86/pr116153.ll
new file mode 100644
index 00000000000000..5c9c2c76131d5a
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr116153.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
+
+define void @_test_func(<16 x half> %0) #0 {
+; CHECK-LABEL: _test_func:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[3,3,3,3,4,5,6,7]
+; CHECK-NEXT:    vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT:    xorl %eax, %eax
+; CHECK-NEXT:    vucomiss %xmm1, %xmm1
+; CHECK-NEXT:    movl $65535, %ecx # imm = 0xFFFF
+; CHECK-NEXT:    movl $0, %edx
+; CHECK-NEXT:    cmovnpl %ecx, %edx
+; CHECK-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; CHECK-NEXT:    vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT:    vucomiss %xmm1, %xmm1
+; CHECK-NEXT:    movl $0, %esi
+; CHECK-NEXT:    cmovnpl %ecx, %esi
+; CHECK-NEXT:    vpshuflw {{.*#+}} xmm1 = xmm0[1,1,1,1,4,5,6,7]
+; CHECK-NEXT:    vcvtph2ps %xmm1, %xmm1
+; CHECK-NEXT:    vucomiss %xmm1, %xmm1
+; CHECK-NEXT:    movl $0, %edi
+; CHECK-NEXT:    cmovnpl %ecx, %edi
+; CHECK-NEXT:    vcvtph2ps %xmm0, %xmm0
+; CHECK-NEXT:    vucomiss %xmm0, %xmm0
+; CHECK-NEXT:    cmovnpl %ecx, %eax
+; CHECK-NEXT:    vmovd %eax, %xmm0
+; CHECK-NEXT:    vpinsrw $1, %edi, %xmm0, %xmm0
+; CHECK-NEXT:    vpinsrw $2, %esi, %xmm0, %xmm0
+; CHECK-NEXT:    vpinsrw $3, %edx, %xmm0, %xmm0
+; CHECK-NEXT:    vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
+; CHECK-NEXT:    vxorps %xmm1, %xmm1, %xmm1
+; CHECK-NEXT:    vpackssdw %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm1, 16
+; CHECK-NEXT:    vmovdqu %xmm0, 0
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+  %2 = fcmp ord <16 x half> %0, zeroinitializer
+  %3 = sext <16 x i1> %2 to <16 x i32>
+  %4 = shufflevector <16 x i32> %3, <16 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %5 = tail call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %4, <4 x i32> zeroinitializer)
+  %6 = shufflevector <8 x i16> %5, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %7 = bitcast <16 x i16> %6 to <32 x i8>
+  store <32 x i8> %7, ptr null, align 1
+  ret void
+}
+
+declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>)
+
+attributes #0 = { "target-features"="+aes,+avx,+avx2,+avx512f,+avx512vnni,+cmov,+crc32,+cx16,+cx8,+evex512,+f16c,+fma,+fxsr,+mmx,+pclmul,+popcnt,+prfchw,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave" }
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
index b214bf082f2357..eafee9e65345f3 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmax-nnan.ll
@@ -413,9 +413,12 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
 ; AVX512F-NEXT:    vpsrld $16, %xmm0, %xmm1
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %ymm2
-; AVX512F-NEXT:    vcvtph2ps %xmm1, %ymm3
-; AVX512F-NEXT:    vcmpltps %zmm2, %zmm3, %k1
+; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm2
+; AVX512F-NEXT:    vcvtph2ps %xmm1, %xmm3
+; AVX512F-NEXT:    vucomiss %xmm3, %xmm2
+; AVX512F-NEXT:    seta %al
+; AVX512F-NEXT:    negb %al
+; AVX512F-NEXT:    kmovd %eax, %k1
 ; AVX512F-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
 ; AVX512F-NEXT:    vmovdqa %xmm1, %xmm0
 ; AVX512F-NEXT:    vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
index 9f37df716b6cd5..e8f9c7f7b524dc 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-fmin-nnan.ll
@@ -412,9 +412,12 @@ define half @test_v2f16(<2 x half> %a0) nounwind {
 ; AVX512F:       # %bb.0:
 ; AVX512F-NEXT:    # kill: def $xmm0 killed $xmm0 def $zmm0
 ; AVX512F-NEXT:    vpsrld $16, %xmm0, %xmm1
-; AVX512F-NEXT:    vcvtph2ps %xmm0, %ymm2
-; AVX512F-NEXT:    vcvtph2ps %xmm1, %ymm3
-; AVX512F-NEXT:    vcmpltps %zmm3, %zmm2, %k1
+; AVX512F-NEXT:    vcvtph2ps %xmm0, %xmm2
+; AVX512F-NEXT:    vcvtph2ps %xmm1, %xmm3
+; AVX512F-NEXT:    xorl %eax, %eax
+; AVX512F-NEXT:    vucomiss %xmm3, %xmm2
+; AVX512F-NEXT:    sbbl %eax, %eax
+; AVX512F-NEXT:    kmovd %eax, %k1
 ; AVX512F-NEXT:    vmovdqu16 %zmm0, %zmm1 {%k1}
 ; AVX512F-NEXT:    vmovdqa %xmm1, %xmm0
 ; AVX512F-NEXT:    vzeroupper

>From c7f8c690d39399652ed7f4a26ab998bb36b937ae Mon Sep 17 00:00:00 2001
From: "Wang, Phoebe" <phoebe.wang at intel.com>
Date: Tue, 10 Dec 2024 18:10:05 +0800
Subject: [PATCH 2/2] Add nounwind

---
 llvm/test/CodeGen/X86/fminimum-fmaximum.ll | 26 +---------------------
 1 file changed, 1 insertion(+), 25 deletions(-)

diff --git a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
index f576edcb82a51d..c6da0c5ca4792c 100644
--- a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
+++ b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
@@ -1384,11 +1384,10 @@ define <4 x float> @test_fmaximum_v4f32_splat(<4 x float> %x, float %y) {
   ret <4 x float> %r
 }
 
-define <4 x half> @test_fmaximum_v4f16(<4 x half> %x, <4 x half> %y) {
+define <4 x half> @test_fmaximum_v4f16(<4 x half> %x, <4 x half> %y) nounwind {
 ; SSE2-LABEL: test_fmaximum_v4f16:
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    subq $104, %rsp
-; SSE2-NEXT:    .cfi_def_cfa_offset 112
 ; SSE2-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
 ; SSE2-NEXT:    psrld $16, %xmm0
 ; SSE2-NEXT:    movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -1524,13 +1523,11 @@ define <4 x half> @test_fmaximum_v4f16(<4 x half> %x, <4 x half> %y) {
 ; SSE2-NEXT:    # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
 ; SSE2-NEXT:    punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
 ; SSE2-NEXT:    addq $104, %rsp
-; SSE2-NEXT:    .cfi_def_cfa_offset 8
 ; SSE2-NEXT:    retq
 ;
 ; AVX1-LABEL: test_fmaximum_v4f16:
 ; AVX1:       # %bb.0:
 ; AVX1-NEXT:    subq $120, %rsp
-; AVX1-NEXT:    .cfi_def_cfa_offset 128
 ; AVX1-NEXT:    vmovaps %xmm0, %xmm2
 ; AVX1-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
 ; AVX1-NEXT:    vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -1636,29 +1633,16 @@ define <4 x half> @test_fmaximum_v4f16(<4 x half> %x, <4 x half> %y) {
 ; AVX1-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
 ; AVX1-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; AVX1-NEXT:    addq $120, %rsp
-; AVX1-NEXT:    .cfi_def_cfa_offset 8
 ; AVX1-NEXT:    retq
 ;
 ; AVX512-LABEL: test_fmaximum_v4f16:
 ; AVX512:       # %bb.0:
 ; AVX512-NEXT:    pushq %rbp
-; AVX512-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512-NEXT:    pushq %r15
-; AVX512-NEXT:    .cfi_def_cfa_offset 24
 ; AVX512-NEXT:    pushq %r14
-; AVX512-NEXT:    .cfi_def_cfa_offset 32
 ; AVX512-NEXT:    pushq %r13
-; AVX512-NEXT:    .cfi_def_cfa_offset 40
 ; AVX512-NEXT:    pushq %r12
-; AVX512-NEXT:    .cfi_def_cfa_offset 48
 ; AVX512-NEXT:    pushq %rbx
-; AVX512-NEXT:    .cfi_def_cfa_offset 56
-; AVX512-NEXT:    .cfi_offset %rbx, -56
-; AVX512-NEXT:    .cfi_offset %r12, -48
-; AVX512-NEXT:    .cfi_offset %r13, -40
-; AVX512-NEXT:    .cfi_offset %r14, -32
-; AVX512-NEXT:    .cfi_offset %r15, -24
-; AVX512-NEXT:    .cfi_offset %rbp, -16
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
 ; AVX512-NEXT:    vcvtph2ps %xmm2, %xmm2
 ; AVX512-NEXT:    vpshufd {{.*#+}} xmm3 = xmm0[3,3,3,3]
@@ -1812,23 +1796,16 @@ define <4 x half> @test_fmaximum_v4f16(<4 x half> %x, <4 x half> %y) {
 ; AVX512-NEXT:    vpblendvb %xmm4, %xmm1, %xmm0, %xmm0
 ; AVX512-NEXT:    vpblendvb %xmm3, %xmm0, %xmm2, %xmm0
 ; AVX512-NEXT:    popq %rbx
-; AVX512-NEXT:    .cfi_def_cfa_offset 48
 ; AVX512-NEXT:    popq %r12
-; AVX512-NEXT:    .cfi_def_cfa_offset 40
 ; AVX512-NEXT:    popq %r13
-; AVX512-NEXT:    .cfi_def_cfa_offset 32
 ; AVX512-NEXT:    popq %r14
-; AVX512-NEXT:    .cfi_def_cfa_offset 24
 ; AVX512-NEXT:    popq %r15
-; AVX512-NEXT:    .cfi_def_cfa_offset 16
 ; AVX512-NEXT:    popq %rbp
-; AVX512-NEXT:    .cfi_def_cfa_offset 8
 ; AVX512-NEXT:    retq
 ;
 ; X86-LABEL: test_fmaximum_v4f16:
 ; X86:       # %bb.0:
 ; X86-NEXT:    subl $164, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 168
 ; X86-NEXT:    vmovdqa %xmm0, %xmm2
 ; X86-NEXT:    vmovdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
 ; X86-NEXT:    vpsrlq $48, %xmm0, %xmm0
@@ -1968,7 +1945,6 @@ define <4 x half> @test_fmaximum_v4f16(<4 x half> %x, <4 x half> %y) {
 ; X86-NEXT:    # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
 ; X86-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
 ; X86-NEXT:    addl $164, %esp
-; X86-NEXT:    .cfi_def_cfa_offset 4
 ; X86-NEXT:    retl
   %r = call <4 x half> @llvm.maximum.v4f16(<4 x half> %x, <4 x half> %y)
   ret <4 x half> %r



More information about the llvm-commits mailing list