[clang] [llvm] [X86][AVX10.2] Support AVX10.2-CONVERT new instructions. (PR #101600)

Freddy Ye via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 16 01:52:07 PDT 2024


================
@@ -0,0 +1,286 @@
+/*===--------- avx10_2_512convertintrin.h - AVX10_2_512CONVERT -------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __IMMINTRIN_H
+#error                                                                         \
+    "Never use <avx10_2_512convertintrin.h> directly; include <immintrin.h> instead."
+#endif // __IMMINTRIN_H
+
+#ifdef __SSE2__
+
+#ifndef __AVX10_2_512CONVERTINTRIN_H
+#define __AVX10_2_512CONVERTINTRIN_H
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS512                                                  \
+  __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-512"),    \
+                 __min_vector_width__(512)))
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512 _mm512_cvtx2ps_ph(__m512 __A,
+                                                                  __m512 __B) {
+  return (__m512h)__builtin_ia32_vcvt2ps2phx512_mask(
+      (__v16sf)__A, (__v16sf)__B, (__v32hf)_mm512_setzero_ph(), (__mmask32)(-1),
+      _MM_FROUND_CUR_DIRECTION);
+}
+
+static __inline__ __m512h __DEFAULT_FN_ATTRS512
+_mm512_mask_cvtx2ps_ph(__m512h __W, __mmask32 __U, __m512 __A, __m512 __B) {
+  return (__m512h)__builtin_ia32_vcvt2ps2phx512_mask(
+      (__v16sf)__A, (__v16sf)__B, (__v32hf)__W, (__mmask32)__U,
+      _MM_FROUND_CUR_DIRECTION);
+}
+
+#define _mm512_cvtx_round2ps_ph(A, B, R)                                       \
+  ((__m512h)__builtin_ia32_vcvt2ps2phx512_mask(                                \
+      (__v16sf)(A), (__v16sf)(B), (__v32hf)_mm512_undefined_ph(),              \
+      (__mmask32)(-1), (const int)(R)))
+
+#define _mm512_mask_cvtx_round2ps_ph(W, U, A, B, R)                            \
+  ((__m512h)__builtin_ia32_vcvt2ps2phx512_mask((__v16sf)(A), (__v16sf)(B),     \
+                                               (__v32hf)(W), (__mmask32)(U),   \
+                                               (const int)(R)))
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_cvtbiasph_pbf8(__m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2bf8_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
+      (__mmask32)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiasph_pbf8(
+    __m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2bf8_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtbiasph_pbf8(__mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2bf8_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
+      (__mmask32)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_cvtbiassph_pbf8(__m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
+      (__mmask32)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiassph_pbf8(
+    __m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtbiassph_pbf8(__mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2bf8s_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
+      (__mmask32)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_cvtbiasph_phf8(__m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2hf8_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
+      (__mmask32)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiasph_phf8(
+    __m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2hf8_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtbiasph_phf8(__mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2hf8_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
+      (__mmask32)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_cvtbiassph_phf8(__m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)_mm256_undefined_si256(),
+      (__mmask32)-1);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtbiassph_phf8(
+    __m256i __W, __mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)__W, (__mmask32)__U);
+}
+
+static __inline__ __m256i __DEFAULT_FN_ATTRS512
+_mm512_maskz_cvtbiassph_phf8(__mmask32 __U, __m512i __A, __m512h __B) {
+  return (__m256i)__builtin_ia32_vcvtbiasph2hf8s_512_mask(
+      (__v64qi)__A, (__v32hf)__B, (__v32qi)(__m256i)_mm256_setzero_si256(),
+      (__mmask32)__U);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtne2ph_pbf8(__m512h __A, __m512h __B) {
+  return (__m512i)__builtin_ia32_vcvtne2ph2bf8_512((__v32hf)(__A),
+                                                   (__v32hf)(__B));
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtne2ph_pbf8(
+    __m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
+  return (__m512i)__builtin_ia32_selectb_512(
+      (__mmask64)__U, (__v64qi)_mm512_cvtne2ph_pbf8(__A, __B), (__v64qi)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtnes2ph_pbf8(__m512h __A, __m512h __B) {
+  return (__m512i)__builtin_ia32_vcvtne2ph2bf8s_512((__v32hf)(__A),
+                                                    (__v32hf)(__B));
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtnes2ph_pbf8(
+    __m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
+  return (__m512i)__builtin_ia32_selectb_512(
+      (__mmask64)__U, (__v64qi)_mm512_cvtnes2ph_pbf8(__A, __B), (__v64qi)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtne2ph_phf8(__m512h __A, __m512h __B) {
+  return (__m512i)__builtin_ia32_vcvtne2ph2hf8_512((__v32hf)(__A),
+                                                   (__v32hf)(__B));
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512 _mm512_mask_cvtne2ph_phf8(
+    __m512i __W, __mmask64 __U, __m512h __A, __m512h __B) {
+  return (__m512i)__builtin_ia32_selectb_512(
+      (__mmask64)__U, (__v64qi)_mm512_cvtne2ph_phf8(__A, __B), (__v64qi)__W);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS512
+_mm512_cvtne2ph2hf8s_phf8(__m512h __A, __m512h __B) {
----------------
FreddyLeaf wrote:

Done in latest.

https://github.com/llvm/llvm-project/pull/101600


More information about the llvm-commits mailing list