[clang] [llvm] [X86][AVX10.2] Support AVX10.2-MINMAX new instructions. (PR #101598)

Freddy Ye via cfe-commits cfe-commits at lists.llvm.org
Sun Aug 4 18:15:04 PDT 2024


================
@@ -0,0 +1,648 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=x86_64-unknown-unknown --show-mc-encoding -mattr=+avx10.2-512 | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -verify-machineinstrs -mtriple=i686-unknown-unknown --show-mc-encoding -mattr=+avx10.2-512 | FileCheck %s --check-prefixes=X86
+
+define <32 x bfloat> @test_int_x86_avx10_vminmaxnepbf16512(<32 x bfloat> %A, <32 x bfloat> %B) nounwind {
+; X64-LABEL: test_int_x86_avx10_vminmaxnepbf16512:
+; X64:       # %bb.0:
+; X64-NEXT:    vminmaxnepbf16 $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7f,0x48,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_avx10_vminmaxnepbf16512:
+; X86:       # %bb.0:
+; X86-NEXT:    vminmaxnepbf16 $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7f,0x48,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <32 x bfloat> @llvm.x86.avx10.vminmaxnepbf16512(<32 x bfloat> %A, <32 x bfloat> %B, i32 127)
+  ret <32 x bfloat> %ret
+}
+
+define <32 x bfloat> @test_int_x86_avx10_mask_vminmaxnepbf16512(<32 x bfloat> %A, <32 x bfloat> %B, <32 x bfloat> %C, i32 %D) nounwind {
+; X64-LABEL: test_int_x86_avx10_mask_vminmaxnepbf16512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxnepbf16 $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7f,0x49,0x52,0xd1,0x7f]
+; X64-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_avx10_mask_vminmaxnepbf16512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxnepbf16 $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7f,0x49,0x52,0xd1,0x7f]
+; X86-NEXT:    vmovdqa64 %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x6f,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+entry:
+  %0 = call <32 x bfloat> @llvm.x86.avx10.vminmaxnepbf16512(<32 x bfloat> %A, <32 x bfloat> %B, i32 127)
+  %1 = bitcast i32 %D to <32 x i1>
+  %2 = select reassoc nsz arcp contract afn <32 x i1> %1, <32 x bfloat> %0, <32 x bfloat> %C
+  ret <32 x bfloat> %2
+}
+
+declare <32 x bfloat> @llvm.x86.avx10.vminmaxnepbf16512(<32 x bfloat> %A, <32 x bfloat> %B, i32 %C)
+
+define <32 x bfloat> @test_int_x86_avx10_maskz_vminmaxnepbf16512(<32 x bfloat> %A, <32 x bfloat> %B, i32 %C) nounwind {
+; X64-LABEL: test_int_x86_avx10_maskz_vminmaxnepbf16512:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxnepbf16 $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7f,0xc9,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_avx10_maskz_vminmaxnepbf16512:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxnepbf16 $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7f,0xc9,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+entry:
+  %0 = call <32 x bfloat> @llvm.x86.avx10.vminmaxnepbf16512(<32 x bfloat> %A, <32 x bfloat> %B, i32 127)
+  %1 = bitcast i32 %C to <32 x i1>
+  %2 = select reassoc nsz arcp contract afn <32 x i1> %1, <32 x bfloat> %0, <32 x bfloat> zeroinitializer
+  ret <32 x bfloat> %2
+}
+
+define <8 x double>@test_int_x86_vminmaxpd(<8 x double> %A, <8 x double> %B) nounwind {
+; X64-LABEL: test_int_x86_vminmaxpd:
+; X64:       # %bb.0:
+; X64-NEXT:    vminmaxpd $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0xfd,0x48,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_vminmaxpd:
+; X86:       # %bb.0:
+; X86-NEXT:    vminmaxpd $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0xfd,0x48,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <8 x double> @llvm.x86.avx10.mask.vminmaxpd.round(<8 x double> %A, <8 x double> %B, i32 127, <8 x double> undef, i8 -1, i32 4)
+  ret <8 x double> %ret
+}
+
+define <8 x double>@test_int_x86_mask_vminmaxpd(<8 x double> %A, <8 x double> %B, <8 x double> %C, i8 %D) nounwind {
+; X64-LABEL: test_int_x86_mask_vminmaxpd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxpd $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x52,0xd1,0x7f]
+; X64-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_mask_vminmaxpd:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxpd $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x49,0x52,0xd1,0x7f]
+; X86-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <8 x double> @llvm.x86.avx10.mask.vminmaxpd.round(<8 x double> %A, <8 x double> %B, i32 127, <8 x double> %C, i8 %D, i32 4)
+  ret <8 x double> %ret
+}
+
+define <8 x double>@test_int_x86_maskz_vminmaxpd(<8 x double> %A, <8 x double> %B, i8 %C) nounwind {
+; X64-LABEL: test_int_x86_maskz_vminmaxpd:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxpd $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0xfd,0xc9,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_maskz_vminmaxpd:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxpd $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0xfd,0xc9,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <8 x double> @llvm.x86.avx10.mask.vminmaxpd.round(<8 x double> %A, <8 x double> %B, i32 127, <8 x double> zeroinitializer, i8 %C, i32 4)
+  ret <8 x double> %ret
+}
+
+define <8 x double>@test_int_x86_vminmaxpd_round(<8 x double> %A, <8 x double> %B) nounwind {
+; X64-LABEL: test_int_x86_vminmaxpd_round:
+; X64:       # %bb.0:
+; X64-NEXT:    vminmaxpd $127, {sae}, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0xfd,0x18,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_vminmaxpd_round:
+; X86:       # %bb.0:
+; X86-NEXT:    vminmaxpd $127, {sae}, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0xfd,0x18,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <8 x double> @llvm.x86.avx10.mask.vminmaxpd.round(<8 x double> %A, <8 x double> %B, i32 127, <8 x double> undef, i8 -1, i32 8)
+  ret <8 x double> %ret
+}
+
+define <8 x double>@test_int_x86_mask_vminmaxpd_round(<8 x double> %A, <8 x double> %B, <8 x double> %C, i8 %D) nounwind {
+; X64-LABEL: test_int_x86_mask_vminmaxpd_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxpd $127, {sae}, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x19,0x52,0xd1,0x7f]
+; X64-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_mask_vminmaxpd_round:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxpd $127, {sae}, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0xfd,0x19,0x52,0xd1,0x7f]
+; X86-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <8 x double> @llvm.x86.avx10.mask.vminmaxpd.round(<8 x double> %A, <8 x double> %B, i32 127, <8 x double> %C, i8 %D, i32 8)
+  ret <8 x double> %ret
+}
+
+define <8 x double>@test_int_x86_maskz_vminmaxpd_round(<8 x double> %A, <8 x double> %B, i8 %C) nounwind {
+; X64-LABEL: test_int_x86_maskz_vminmaxpd_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxpd $127, {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0xfd,0x99,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_maskz_vminmaxpd_round:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxpd $127, {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0xfd,0x99,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <8 x double> @llvm.x86.avx10.mask.vminmaxpd.round(<8 x double> %A, <8 x double> %B, i32 127, <8 x double> zeroinitializer, i8 %C, i32 8)
+  ret <8 x double> %ret
+}
+
+declare<8 x double> @llvm.x86.avx10.mask.vminmaxpd.round(<8 x double> %A, <8 x double> %B, i32 %C, <8 x double> %D, i8 %E, i32 %F)
+
+define <32 x half>@test_int_x86_vminmaxph(<32 x half> %A, <32 x half> %B) nounwind {
+; X64-LABEL: test_int_x86_vminmaxph:
+; X64:       # %bb.0:
+; X64-NEXT:    vminmaxph $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7c,0x48,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_vminmaxph:
+; X86:       # %bb.0:
+; X86-NEXT:    vminmaxph $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7c,0x48,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <32 x half> @llvm.x86.avx10.mask.vminmaxph.round(<32 x half> %A, <32 x half> %B, i32 127, <32 x half> undef, i32 -1, i32 4)
+  ret <32 x half> %ret
+}
+
+define <32 x half>@test_int_x86_mask_vminmaxph(<32 x half> %A, <32 x half> %B, <32 x half> %C, i32 %D) nounwind {
+; X64-LABEL: test_int_x86_mask_vminmaxph:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxph $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7c,0x49,0x52,0xd1,0x7f]
+; X64-NEXT:    vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_mask_vminmaxph:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxph $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7c,0x49,0x52,0xd1,0x7f]
+; X86-NEXT:    vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <32 x half> @llvm.x86.avx10.mask.vminmaxph.round(<32 x half> %A, <32 x half> %B, i32 127, <32 x half> %C, i32 %D, i32 4)
+  ret <32 x half> %ret
+}
+
+define <32 x half>@test_int_x86_maskz_vminmaxph(<32 x half> %A, <32 x half> %B, i32 %C) nounwind {
+; X64-LABEL: test_int_x86_maskz_vminmaxph:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxph $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7c,0xc9,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_maskz_vminmaxph:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxph $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7c,0xc9,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <32 x half> @llvm.x86.avx10.mask.vminmaxph.round(<32 x half> %A, <32 x half> %B, i32 127, <32 x half> zeroinitializer, i32 %C, i32 4)
+  ret <32 x half> %ret
+}
+
+define <32 x half>@test_int_x86_vminmaxph_round(<32 x half> %A, <32 x half> %B) nounwind {
+; X64-LABEL: test_int_x86_vminmaxph_round:
+; X64:       # %bb.0:
+; X64-NEXT:    vminmaxph $127, {sae}, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7c,0x18,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_vminmaxph_round:
+; X86:       # %bb.0:
+; X86-NEXT:    vminmaxph $127, {sae}, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7c,0x18,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <32 x half> @llvm.x86.avx10.mask.vminmaxph.round(<32 x half> %A, <32 x half> %B, i32 127, <32 x half> undef, i32 -1, i32 8)
+  ret <32 x half> %ret
+}
+
+define <32 x half>@test_int_x86_mask_vminmaxph_round(<32 x half> %A, <32 x half> %B, <32 x half> %C, i32 %D) nounwind {
+; X64-LABEL: test_int_x86_mask_vminmaxph_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxph $127, {sae}, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7c,0x19,0x52,0xd1,0x7f]
+; X64-NEXT:    vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_mask_vminmaxph_round:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxph $127, {sae}, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7c,0x19,0x52,0xd1,0x7f]
+; X86-NEXT:    vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <32 x half> @llvm.x86.avx10.mask.vminmaxph.round(<32 x half> %A, <32 x half> %B, i32 127, <32 x half> %C, i32 %D, i32 8)
+  ret <32 x half> %ret
+}
+
+define <32 x half>@test_int_x86_maskz_vminmaxph_round(<32 x half> %A, <32 x half> %B, i32 %C) nounwind {
+; X64-LABEL: test_int_x86_maskz_vminmaxph_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxph $127, {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7c,0x99,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_maskz_vminmaxph_round:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovd {{[0-9]+}}(%esp), %k1 # encoding: [0xc4,0xe1,0xf9,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxph $127, {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7c,0x99,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <32 x half> @llvm.x86.avx10.mask.vminmaxph.round(<32 x half> %A, <32 x half> %B, i32 127, <32 x half> zeroinitializer, i32 %C, i32 8)
+  ret <32 x half> %ret
+}
+
+declare<32 x half> @llvm.x86.avx10.mask.vminmaxph.round(<32 x half> %A, <32 x half> %B, i32 %C, <32 x half> %D, i32 %E, i32 %F)
+
+define <16 x float>@test_int_x86_vminmaxps(<16 x float> %A, <16 x float> %B) nounwind {
+; X64-LABEL: test_int_x86_vminmaxps:
+; X64:       # %bb.0:
+; X64-NEXT:    vminmaxps $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7d,0x48,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_vminmaxps:
+; X86:       # %bb.0:
+; X86-NEXT:    vminmaxps $127, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7d,0x48,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <16 x float> @llvm.x86.avx10.mask.vminmaxps.round(<16 x float> %A, <16 x float> %B, i32 127, <16 x float> undef, i16 -1, i32 4)
+  ret <16 x float> %ret
+}
+
+define <16 x float>@test_int_x86_mask_vminmaxps(<16 x float> %A, <16 x float> %B, <16 x float> %C, i16 %D) nounwind {
+; X64-LABEL: test_int_x86_mask_vminmaxps:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxps $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x52,0xd1,0x7f]
+; X64-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_mask_vminmaxps:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxps $127, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x49,0x52,0xd1,0x7f]
+; X86-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <16 x float> @llvm.x86.avx10.mask.vminmaxps.round(<16 x float> %A, <16 x float> %B, i32 127, <16 x float> %C, i16 %D, i32 4)
+  ret <16 x float> %ret
+}
+
+define <16 x float>@test_int_x86_maskz_vminmaxps(<16 x float> %A, <16 x float> %B, i16 %C) nounwind {
+; X64-LABEL: test_int_x86_maskz_vminmaxps:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxps $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0xc9,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_maskz_vminmaxps:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxps $127, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0xc9,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <16 x float> @llvm.x86.avx10.mask.vminmaxps.round(<16 x float> %A, <16 x float> %B, i32 127, <16 x float> zeroinitializer, i16 %C, i32 4)
+  ret <16 x float> %ret
+}
+
+define <16 x float>@test_int_x86_vminmaxps_round(<16 x float> %A, <16 x float> %B) nounwind {
+; X64-LABEL: test_int_x86_vminmaxps_round:
+; X64:       # %bb.0:
+; X64-NEXT:    vminmaxps $127, {sae}, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7d,0x18,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_vminmaxps_round:
+; X86:       # %bb.0:
+; X86-NEXT:    vminmaxps $127, {sae}, %zmm1, %zmm0, %zmm0 # encoding: [0x62,0xf3,0x7d,0x18,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <16 x float> @llvm.x86.avx10.mask.vminmaxps.round(<16 x float> %A, <16 x float> %B, i32 127, <16 x float> undef, i16 -1, i32 8)
+  ret <16 x float> %ret
+}
+
+define <16 x float>@test_int_x86_mask_vminmaxps_round(<16 x float> %A, <16 x float> %B, <16 x float> %C, i16 %D) nounwind {
+; X64-LABEL: test_int_x86_mask_vminmaxps_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxps $127, {sae}, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x19,0x52,0xd1,0x7f]
+; X64-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_mask_vminmaxps_round:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxps $127, {sae}, %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf3,0x7d,0x19,0x52,0xd1,0x7f]
+; X86-NEXT:    vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <16 x float> @llvm.x86.avx10.mask.vminmaxps.round(<16 x float> %A, <16 x float> %B, i32 127, <16 x float> %C, i16 %D, i32 8)
+  ret <16 x float> %ret
+}
+
+define <16 x float>@test_int_x86_maskz_vminmaxps_round(<16 x float> %A, <16 x float> %B, i16 %C) nounwind {
+; X64-LABEL: test_int_x86_maskz_vminmaxps_round:
+; X64:       # %bb.0:
+; X64-NEXT:    kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
+; X64-NEXT:    vminmaxps $127, {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x99,0x52,0xc1,0x7f]
+; X64-NEXT:    retq # encoding: [0xc3]
+;
+; X86-LABEL: test_int_x86_maskz_vminmaxps_round:
+; X86:       # %bb.0:
+; X86-NEXT:    kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
+; X86-NEXT:    vminmaxps $127, {sae}, %zmm1, %zmm0, %zmm0 {%k1} {z} # encoding: [0x62,0xf3,0x7d,0x99,0x52,0xc1,0x7f]
+; X86-NEXT:    retl # encoding: [0xc3]
+  %ret = call <16 x float> @llvm.x86.avx10.mask.vminmaxps.round(<16 x float> %A, <16 x float> %B, i32 127, <16 x float> zeroinitializer, i16 %C, i32 8)
+  ret <16 x float> %ret
+}
+
+declare<16 x float> @llvm.x86.avx10.mask.vminmaxps.round(<16 x float> %A, <16 x float> %B, i32 %C, <16 x float> %D, i16 %E, i32 %F)
+
+define <2 x double>@test_int_x86_vminmaxsd(<2 x double> %A, <2 x double> %B) nounwind {
----------------
FreddyLeaf wrote:

[adfe6cd](https://github.com/llvm/llvm-project/pull/101598/commits/adfe6cd518f7f75e8dbbdfcdc6ddadae1a27fc4a)

https://github.com/llvm/llvm-project/pull/101598


More information about the cfe-commits mailing list