[clang] [X86][AVX10.2] Add comments for the avx10_2convertintrin.h file (PR #120766)

Mikołaj Piróg via cfe-commits cfe-commits at lists.llvm.org
Fri Jan 3 08:41:18 PST 2025


https://github.com/mikolaj-pirog updated https://github.com/llvm/llvm-project/pull/120766

>From 0c107c06255cc2c8c2ae1abd1825d547b70db33a Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Fri, 13 Dec 2024 15:40:14 +0100
Subject: [PATCH 01/22] mend

---
 clang/lib/Headers/avx10_2convertintrin.h | 304 +++++++++++++++++++++++
 1 file changed, 304 insertions(+)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 134adb2850c8de..62b71ce87dd382 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -24,24 +24,146 @@
   __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-256"),    \
                  __min_vector_width__(256)))
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 128-bit vector
+/// containing FP16 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF i < 4
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 128-bit vector of [4 x float].
+/// \param __B
+///    A 128-bit vector of [4 x float].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
                                                                __m128 __B) {
   return (__m128h)__builtin_ia32_vcvt2ps2phx128_mask(
       (__v4sf)__A, (__v4sf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)(-1));
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 128-bit vector
+/// containing FP16 elements. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF mask[i]
+/// 		dst.fp16[i] := __W[i]
+/// 	ELSE
+/// 		IF i < 4
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 		ELSE
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [8 x fp16].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [4 x float].
+/// \param __B
+///    A 128-bit vector of [4 x float].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
   return (__m128h)__builtin_ia32_vcvt2ps2phx128_mask(
       (__v4sf)__A, (__v4sf)__B, (__v8hf)__W, (__mmask8)__U);
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 128-bit vector
+/// containing FP16 elements. Zeroing mask \a __U is used to determine if given
+/// element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF mask[i]
+/// 		dst.fp16[i] := 0
+/// 	ELSE
+/// 		IF i < 4
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 		ELSE
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [4 x float].
+/// \param __B
+///    A 128-bit vector of [4 x float].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    zero is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtx2ps_ph(__mmask8 __U, __m128 __A, __m128 __B) {
   return (__m128h)__builtin_ia32_vcvt2ps2phx128_mask(
       (__v4sf)__A, (__v4sf)__B, (__v8hf)_mm_setzero_ph(), (__mmask8)__U);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 256-bit vector
+/// containing FP16 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF i < 8
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 256-bit vector of [8 x float].
+/// \param __B
+///    A 256-bit vector of [8 x float].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
                                                                   __m256 __B) {
   return (__m256h)__builtin_ia32_vcvt2ps2phx256_mask(
@@ -49,6 +171,42 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
       _MM_FROUND_CUR_DIRECTION);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 256-bit vector
+/// containing FP16 elements. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF mask[i]
+/// 		dst.fp16[i] := __W[i]
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 		ELSE
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 256-bit vector of [16 x fp16].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x float].
+/// \param __B
+///    A 256-bit vector of [8 x float].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
   return (__m256h)__builtin_ia32_vcvt2ps2phx256_mask(
@@ -56,6 +214,40 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
       _MM_FROUND_CUR_DIRECTION);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 256-bit vector
+/// containing FP16 elements. Zeroing mask \a __U is used to determine if given
+/// element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF mask[i]
+/// 		dst.fp16[i] := 0
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 		ELSE
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x float].
+/// \param __B
+///    A 256-bit vector of [8 x float].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    zero is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
   return (__m256h)__builtin_ia32_vcvt2ps2phx256_mask(
@@ -63,15 +255,127 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
       _MM_FROUND_CUR_DIRECTION);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 256-bit vector
+/// containing FP16 elements. Rounding mode \a __R needs to be provided.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF i < 8
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 256-bit vector of [8 x float].
+/// \param __B
+///    A 256-bit vector of [8 x float].
+/// \param __R
+///    Rounding mode. Valid inputs are: _MM_FROUND_CUR_DIRECTION or
+///    result bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
+///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
+///    _MM_FROUND_TO_ZERO.
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 #define _mm256_cvtx_round2ps_ph(A, B, R)                                       \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
       (__v8sf)(A), (__v8sf)(B), (__v16hf)_mm256_undefined_ph(),                \
       (__mmask16)(-1), (const int)(R)))
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 256-bit vector
+/// containing FP16 elements. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead. Rounding mode \a __R needs to
+/// be provided.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF mask[i]
+/// 		dst.fp16[i] := __W[i]
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 		ELSE
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 256-bit vector of [16 x fp16].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x float].
+/// \param __B
+///    A 256-bit vector of [8 x float].
+/// \param __R
+///    Rounding mode. Valid inputs are: _MM_FROUND_CUR_DIRECTION or
+///    result bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
+///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
+///    _MM_FROUND_TO_ZERO.
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 #define _mm256_mask_cvtx_round2ps_ph(W, U, A, B, R)                            \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
       (__v8sf)(A), (__v8sf)(B), (__v16hf)(W), (__mmask16)(U), (const int)(R)))
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements to a 256-bit vector
+/// containing FP16 elements. Zeroing mask \a __U is used to determine if given
+/// element should be zeroed instead. Rounding mode \a __R needs to be provided.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF mask[i]
+/// 		dst.fp16[i] := 0
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 		ELSE
+/// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x float].
+/// \param __B
+///    A 256-bit vector of [8 x float].
+/// \param __R
+///    Rounding mode. Valid inputs are: _MM_FROUND_CUR_DIRECTION or
+///    result bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
+///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
+///    _MM_FROUND_TO_ZERO.
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    zero is taken instead.
 #define _mm256_maskz_cvtx_round2ps_ph(U, A, B, R)                              \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
       (__v8sf)(A), (__v8sf)(B), (__v16hf)(_mm256_setzero_ph()),                \

>From 5a7f69432e56429aa325e6e99b6614582a866277 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Tue, 17 Dec 2024 15:52:08 +0100
Subject: [PATCH 02/22] further work

---
 clang/lib/Headers/avx10_2convertintrin.h | 226 +++++++++++++++++++++--
 1 file changed, 209 insertions(+), 17 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 62b71ce87dd382..28e028e906bb66 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -106,13 +106,13 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 /// \code{.operation]
 /// FOR i := 0 to 7
 /// 	IF mask[i]
-/// 		dst.fp16[i] := 0
-/// 	ELSE
 /// 		IF i < 4
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -130,8 +130,8 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
-///    zero is taken instead.
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then zero is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtx2ps_ph(__mmask8 __U, __m128 __A, __m128 __B) {
   return (__m128h)__builtin_ia32_vcvt2ps2phx128_mask(
@@ -222,13 +222,13 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 /// \code{.operation]
 /// FOR i := 0 to 15 
 /// 	IF mask[i]
-/// 		dst.fp16[i] := 0
-/// 	ELSE
 /// 		IF i < 8
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -238,7 +238,7 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __U
-///    A 8-bit merging mask.
+///    A 8-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [8 x float].
 /// \param __B
@@ -246,8 +246,8 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
-///    zero is taken instead.
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then zero is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
   return (__m256h)__builtin_ia32_vcvt2ps2phx256_mask(
@@ -279,7 +279,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    A 256-bit vector of [8 x float].
 /// \param __R
 ///    Rounding mode. Valid inputs are: _MM_FROUND_CUR_DIRECTION or
-///    result bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
+///    result of bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
@@ -325,7 +325,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    A 256-bit vector of [8 x float].
 /// \param __R
 ///    Rounding mode. Valid inputs are: _MM_FROUND_CUR_DIRECTION or
-///    result bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
+///    result of bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
@@ -345,13 +345,13 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// \code{.operation]
 /// FOR i := 0 to 15 
 /// 	IF mask[i]
-/// 		dst.fp16[i] := 0
-/// 	ELSE
 /// 		IF i < 8
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -361,21 +361,21 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __U
-///    A 8-bit merging mask.
+///    A 8-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [8 x float].
 /// \param __B
 ///    A 256-bit vector of [8 x float].
 /// \param __R
 ///    Rounding mode. Valid inputs are: _MM_FROUND_CUR_DIRECTION or
-///    result bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
+///    result of bitwise or of _MM_FROUND_NO_EXC with at most one of the following:
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
-///    zero is taken instead.
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then zero is taken instead.
 #define _mm256_maskz_cvtx_round2ps_ph(U, A, B, R)                              \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
       (__v8sf)(A), (__v8sf)(B), (__v16hf)(_mm256_setzero_ph()),                \
@@ -537,18 +537,114 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
       (__mmask16)__U);
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed
+/// FP16 floating-point elements to a 128-bit vector
+/// containing E5M2 FP8 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF i < 8
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
                                                                   __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtne2ph2bf8_128((__v8hf)(__A),
                                                    (__v8hf)(__B));
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_selectb_128(
       (__mmask16)__U, (__v16qi)_mm_cvtne2ph_pbf8(__A, __B), (__v16qi)__W);
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_selectb_128(
@@ -556,18 +652,114 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
       (__v16qi)(__m128i)_mm_setzero_si128());
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
+/// FP16 floating-point elements to a 256-bit vector
+/// containing E5M2 FP8 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF i < 16 
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_vcvtne2ph2bf8_256((__v16hf)(__A),
                                                    (__v16hf)(__B));
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W.fp8[i]
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 256-bit vector of [32 x fp8].
+/// \param __U
+///    A 32-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
       (__mmask16)__U, (__v32qi)_mm256_cvtne2ph_pbf8(__A, __B), (__v32qi)__W);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := 0
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 32-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(

>From f895546ee95495ae6464582510f1e79b30e60948 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Thu, 19 Dec 2024 20:10:03 +0100
Subject: [PATCH 03/22] further work

---
 clang/lib/Headers/avx10_2convertintrin.h | 1586 +++++++++++++++++++++-
 1 file changed, 1562 insertions(+), 24 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 28e028e906bb66..15c7d9b66f520f 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -381,6 +381,32 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
       (__v8sf)(A), (__v8sf)(B), (__v16hf)(_mm256_setzero_ph()),                \
       (__mmask16)(U), (const int)(R)))
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed
+/// single-precision (32-bit) floating-point elements and 16-bit integers
+/// respectively. 
+
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF i < 4
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 128-bit vector of [4 x float].
+/// \param __B
+///    A 128-bit vector of [4 x float].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_128_mask(
@@ -537,9 +563,8 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
       (__mmask16)__U);
 }
 
-/// Convert two 128-bit vectors, \a __A and \a __B, containing packed
-/// FP16 floating-point elements to a 128-bit vector
-/// containing E5M2 FP8 elements.
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
 ///
 /// \code{.operation]
 /// FOR i := 0 to 16 
@@ -575,7 +600,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// from \a __W instead.
 ///
 /// \code{.operation]
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
 /// 	ELSE
@@ -652,9 +677,8 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
       (__v16qi)(__m128i)_mm_setzero_si128());
 }
 
-/// Convert two 256-bit vectors, \a __A and \a __B, containing packed
-/// FP16 floating-point elements to a 256-bit vector
-/// containing E5M2 FP8 elements.
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
 ///
 /// \code{.operation]
 /// FOR i := 0 to 32 
@@ -759,7 +783,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 ///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
-///    then element from \a __W is taken instead.
+///    zero is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
@@ -767,18 +791,114 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
       (__v32qi)(__m256i)_mm256_setzero_si256());
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+/// Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF i < 8
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtne2ph2bf8s_128((__v8hf)(__A),
                                                     (__v8hf)(__B));
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_selectb_128(
       (__mmask16)__U, (__v16qi)_mm_cvtnes2ph_pbf8(__A, __B), (__v16qi)__W);
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_selectb_128(
@@ -786,18 +906,114 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
       (__v16qi)(__m128i)_mm_setzero_si128());
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+/// Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF i < 16 
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_vcvtne2ph2bf8s_256((__v16hf)(__A),
                                                     (__v16hf)(__B));
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W.fp8[i]
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 256-bit vector of [32 x fp8].
+/// \param __U
+///    A 32-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
       (__mmask16)__U, (__v32qi)_mm256_cvtnes2ph_pbf8(__A, __B), (__v32qi)__W);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be zeroed
+/// instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := 0
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 32-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    zero is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
@@ -805,37 +1021,227 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
       (__v32qi)(__m256i)_mm256_setzero_si256());
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF i < 8
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
                                                                   __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtne2ph2hf8_128((__v8hf)(__A),
                                                    (__v8hf)(__B));
 }
 
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
-  return (__m128i)__builtin_ia32_selectb_128(
-      (__mmask16)__U, (__v16qi)_mm_cvtne2ph_phf8(__A, __B), (__v16qi)__W);
-}
-
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
-  return (__m128i)__builtin_ia32_selectb_128(
-      (__mmask16)__U, (__v16qi)_mm_cvtne2ph_phf8(__A, __B),
-      (__v16qi)(__m128i)_mm_setzero_si128());
-}
-
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
-  return (__m256i)__builtin_ia32_vcvtne2ph2hf8_256((__v16hf)(__A),
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
+  return (__m128i)__builtin_ia32_selectb_128(
+      (__mmask16)__U, (__v16qi)_mm_cvtne2ph_phf8(__A, __B), (__v16qi)__W);
+}
+
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
+static __inline__ __m128i __DEFAULT_FN_ATTRS128
+_mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
+  return (__m128i)__builtin_ia32_selectb_128(
+      (__mmask16)__U, (__v16qi)_mm_cvtne2ph_phf8(__A, __B),
+      (__v16qi)(__m128i)_mm_setzero_si128());
+}
+
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF i < 16 
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
+static __inline__ __m256i __DEFAULT_FN_ATTRS256
+_mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
+  return (__m256i)__builtin_ia32_vcvtne2ph2hf8_256((__v16hf)(__A),
                                                    (__v16hf)(__B));
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W.fp8[i]
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
+///
+/// \param __W
+///    A 256-bit vector of [32 x fp8].
+/// \param __U
+///    A 32-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
       (__mmask16)__U, (__v32qi)_mm256_cvtne2ph_phf8(__A, __B), (__v32qi)__W);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := 0
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 32-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    zero is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
@@ -843,18 +1249,114 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
       (__v32qi)(__m256i)_mm256_setzero_si256());
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+/// Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF i < 8
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtne2ph2hf8s_128((__v8hf)(__A),
                                                     (__v8hf)(__B));
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_selectb_128(
       (__mmask16)__U, (__v16qi)_mm_cvtnes2ph_phf8(__A, __B), (__v16qi)__W);
 }
 
+/// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 16 
+/// 	IF __U[i]
+/// 		IF i < 8
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
   return (__m128i)__builtin_ia32_selectb_128(
@@ -862,18 +1364,114 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
       (__v16qi)(__m128i)_mm_setzero_si128());
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+/// Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF i < 16 
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_vcvtne2ph2hf8s_256((__v16hf)(__A),
                                                     (__v16hf)(__B));
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W.fp8[i]
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 256-bit vector of [32 x fp8].
+/// \param __U
+///    A 32-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
       (__mmask16)__U, (__v32qi)_mm256_cvtnes2ph_phf8(__A, __B), (__v32qi)__W);
 }
 
+/// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+/// Merging mask \a __U is used to determine if given element should be zeroed
+/// instead. Resulting elements are saturated in case of overflow.
+///
+/// \code{.operation]
+/// FOR i := 0 to 32 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := 0
+/// 	ELSE
+/// 		IF i < 16 
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		ELSE
+/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		FI
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 32-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    (converted) elements from \a __B; higher order elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    zero is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
   return (__m256i)__builtin_ia32_selectb_256(
@@ -881,206 +1479,1146 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
       (__v32qi)(__m256i)_mm256_setzero_si256());
 }
 
+/// Convert 128-bit vector \a __A, containing packed FP8 E4M3 floating-point
+/// elements to a 128-bit vector containing FP16 elements. The conversion is exact.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __A
+///    A 128-bit vector of [16 x fp8].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
   return (__m128h)__builtin_ia32_vcvthf8_2ph128_mask(
       (__v16qi)__A, (__v8hf)(__m128h)_mm_undefined_ph(), (__mmask8)-1);
 }
 
+/// Convert 128-bit vector \a __A, containing packed FP8 E4M3 floating-point
+/// elements to a 128-bit vector containing FP16 elements. The conversion is
+/// exact. Merging mask \a __U is used to determine if given element should be
+/// taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp16[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __W
+///    A 128-bit vector of [8 x fp16].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [16 x fp8].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
   return (__m128h)__builtin_ia32_vcvthf8_2ph128_mask(
       (__v16qi)__A, (__v8hf)(__m128h)__W, (__mmask8)__U);
 }
 
+/// Convert 128-bit vector \a __A, containing packed FP8 E4M3 floating-point
+/// elements to a 128-bit vector containing FP16 elements. The conversion is
+/// exact. Zeroing mask \a __U is used to determine if given element should be
+/// zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __U
+///    A 8-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [16 x fp8].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
   return (__m128h)__builtin_ia32_vcvthf8_2ph128_mask(
       (__v16qi)__A, (__v8hf)(__m128h)_mm_setzero_ph(), (__mmask8)__U);
 }
 
+/// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
+/// elements to a 256-bit vector containing FP16 elements. The conversion is exact.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __A
+///    A 256-bit vector of [32 x fp8].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_cvtnehf8_ph(__m128i __A) {
   return (__m256h)__builtin_ia32_vcvthf8_2ph256_mask(
       (__v16qi)__A, (__v16hf)(__m256h)_mm256_undefined_ph(), (__mmask16)-1);
 }
 
+/// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
+/// elements to a 256-bit vector containing FP16 elements. The conversion is
+/// exact. Merging mask \a __U is used to determine if given element should be
+/// taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF __U[i]
+/// 		dst.fp16[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __W
+///    A 256-bit vector of [16 x fp16].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [32 x fp8].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
   return (__m256h)__builtin_ia32_vcvthf8_2ph256_mask(
       (__v16qi)__A, (__v16hf)(__m256h)__W, (__mmask16)__U);
 }
 
+/// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
+/// elements to a 256-bit vector containing FP16 elements. The conversion is
+/// exact. Zeroing mask \a __U is used to determine if given element should be
+/// zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF __U[i]
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [32 x fp8].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
   return (__m256h)__builtin_ia32_vcvthf8_2ph256_mask(
       (__v16qi)__A, (__v16hf)(__m256h)_mm256_setzero_ph(), (__mmask16)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+/// resulting vector are zeroed.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Merging mask \a __U is used to determine if
+/// given element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Zeroing mask \a __U is used to determine if
+/// given element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask8)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtneph_pbf8(__m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask16)-1);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Merging mask \a __U is
+/// used to determine if given element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Zeroing mask \a __U is
+/// used to determine if given element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Results are saturated.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Results are saturated. Merging mask \a __U is
+/// used to determine if given element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
+/// used to determine if given element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask8)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtnesph_pbf8(__m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask16)-1);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+/// resulting vector are zeroed.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Merging mask \a __U is used to determine if
+/// given element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Zeroing mask \a __U is used to determine if
+/// given element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask8)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtneph_phf8(__m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask16)-1);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Merging mask \a __U is
+/// used to determine if given element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Zeroing mask \a __U is
+/// used to determine if given element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Results are saturated.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Results are saturated. Merging mask \a __U is
+/// used to determine if given element should be taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+/// resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
+/// used to determine if given element should be zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    (converted) elements from \a __A; upper elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_128_mask(
       (__v8hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask8)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtnesph_phf8(__m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_undefined_si128(), (__mmask16)-1);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[255:128] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [8 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
+/// to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF __U[i]
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8[i] := 0
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+///
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set,
+///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_256_mask(
       (__v16hf)__A, (__v16qi)(__m128i)_mm_setzero_si128(), (__mmask16)__U);
 }
 
+/// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
+/// elements to a 128-bit vector containing FP16 elements. The conversion is exact.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __A
+///    A 128-bit vector of [16 x fp8].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
   return _mm_castsi128_ph(_mm_slli_epi16(_mm_cvtepi8_epi16(__A), 8));
 }
 
+/// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
+/// elements to a 128-bit vector containing FP16 elements. The conversion is
+/// exact. Merging mask \a __U is used to determine if given element should be
+/// taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp16[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __W
+///    A 128-bit vector of [8 x fp16].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [16 x fp8].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
   return _mm_castsi128_ph(
       _mm_mask_slli_epi16((__m128i)__S, __U, _mm_cvtepi8_epi16(__A), 8));
 }
 
+/// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
+/// elements to a 128-bit vector containing FP16 elements. The conversion is
+/// exact. Zeroing mask \a __U is used to determine if given element should be
+/// zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __U
+///    A 8-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [16 x fp8].
+/// \returns
+///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
   return _mm_castsi128_ph(_mm_slli_epi16(_mm_maskz_cvtepi8_epi16(__U, __A), 8));
 }
 
+/// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
+/// elements to a 256-bit vector containing FP16 elements. The conversion is exact.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15
+/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __A
+///    A 256-bit vector of [32 x fp8].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
   return _mm256_castsi256_ph(_mm256_slli_epi16(_mm256_cvtepi8_epi16(__A), 8));
 }
 
+/// Convert 256-bit vector \a __A, containing packed FP8 E5M2 floating-point
+/// elements to a 256-bit vector containing FP16 elements. The conversion is
+/// exact. Merging mask \a __U is used to determine if given element should be
+/// taken from \a __W instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF __U[i]
+/// 		dst.fp16[i] := __W[i]
+/// 	ELSE
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __W
+///    A 256-bit vector of [16 x fp16].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [32 x fp8].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {
   return _mm256_castsi256_ph(
       _mm256_mask_slli_epi16((__m256i)__S, __U, _mm256_cvtepi8_epi16(__A), 8));
 }
 
+/// Convert 256-bit vector \a __A, containing packed FP8 E5M2 floating-point
+/// elements to a 256-bit vector containing FP16 elements. The conversion is
+/// exact. Zeroing mask \a __U is used to determine if given element should be
+/// zeroed instead.
+///
+/// \code{.operation]
+/// FOR i := 0 to 15 
+/// 	IF __U[i]
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := 0
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+///
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [32 x fp8].
+/// \returns
+///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
+///    zero is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
   return _mm256_castsi256_ph(

>From 99ff5fe6607836cc0b39d4794fd7d932398e7bd6 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Fri, 20 Dec 2024 14:44:14 +0100
Subject: [PATCH 04/22] further work

---
 clang/lib/Headers/avx10_2convertintrin.h | 837 ++++++++++++++++++++---
 1 file changed, 751 insertions(+), 86 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 15c7d9b66f520f..341dc9cfd6aae3 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -28,7 +28,7 @@
 /// single-precision (32-bit) floating-point elements to a 128-bit vector
 /// containing FP16 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF i < 4
 /// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
@@ -61,7 +61,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// containing FP16 elements. Merging mask \a __U is used to determine if given
 /// element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF mask[i]
 /// 		dst.fp16[i] := __W[i]
@@ -103,7 +103,7 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 /// containing FP16 elements. Zeroing mask \a __U is used to determine if given
 /// element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF mask[i]
 /// 		IF i < 4
@@ -142,7 +142,7 @@ _mm_maskz_cvtx2ps_ph(__mmask8 __U, __m128 __A, __m128 __B) {
 /// single-precision (32-bit) floating-point elements to a 256-bit vector
 /// containing FP16 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF i < 8
 /// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
@@ -176,7 +176,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 /// containing FP16 elements. Merging mask \a __U is used to determine if given
 /// element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF mask[i]
 /// 		dst.fp16[i] := __W[i]
@@ -219,7 +219,7 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 /// containing FP16 elements. Zeroing mask \a __U is used to determine if given
 /// element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF mask[i]
 /// 		IF i < 8
@@ -259,7 +259,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// single-precision (32-bit) floating-point elements to a 256-bit vector
 /// containing FP16 elements. Rounding mode \a __R needs to be provided.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF i < 8
 /// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
@@ -297,7 +297,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// element should be taken from \a __W instead. Rounding mode \a __R needs to
 /// be provided.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF mask[i]
 /// 		dst.fp16[i] := __W[i]
@@ -342,7 +342,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// containing FP16 elements. Zeroing mask \a __U is used to determine if given
 /// element should be zeroed instead. Rounding mode \a __R needs to be provided.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF mask[i]
 /// 		IF i < 8
@@ -381,18 +381,16 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
       (__v8sf)(A), (__v8sf)(B), (__v16hf)(_mm256_setzero_ph()),                \
       (__mmask16)(U), (const int)(R)))
 
-/// Add two 128-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements and 16-bit integers
-/// respectively. 
-
-/// \code{.operation]
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///
+/// \code{.operation}
 /// FOR i := 0 to 7
-/// 	IF i < 4
-/// 		dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
-/// 	ELSE
-/// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
-/// 	FI
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
+///
+/// dst[127:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -400,25 +398,90 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [4 x float].
+///    A 128-bit vector of [8 x fp16].
 /// \param __B
-///    A 128-bit vector of [4 x float].
+///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
-///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A.
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 8-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_128_mask(
@@ -426,6 +489,27 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
       (__mmask8)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_256_mask(
@@ -433,12 +517,74 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
       (__mmask16)-1);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_256_mask(
       (__v32qi)__A, (__v16hf)__B, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is not set,
+///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_256_mask(
@@ -446,18 +592,108 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
       (__mmask16)__U);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Results are saturated.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8s_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Results are saturated. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8s_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Results are saturated. Zeroing mask \a __U is used to determine if given
+/// element should be zeroed instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 8-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8s_128_mask(
@@ -465,6 +701,28 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
       (__mmask8)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Results are saturated.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8s_256_mask(
@@ -472,12 +730,74 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
       (__mmask16)-1);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Results are saturated. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8s_256_mask(
       (__v32qi)__A, (__v16hf)__B, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+/// Results are saturated. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is not set,
+///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8s_256_mask(
@@ -485,18 +805,107 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
       (__mmask16)__U);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Zeroing mask \a __U is used to determine if given element should be zeroed
+/// instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 8-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8_128_mask(
@@ -504,6 +913,27 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
       (__mmask8)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8_256_mask(
@@ -511,12 +941,74 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
       (__mmask16)-1);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8_256_mask(
       (__v32qi)__A, (__v16hf)__B, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3 
+/// Merging mask \a __U is used to determine if given element should be taken
+/// from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is not set,
+///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8_256_mask(
@@ -524,18 +1016,108 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
       (__mmask16)__U);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Results are saturated.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8s_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)_mm_undefined_si128(), (__mmask8)-1);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Results are saturated. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 8-bit merging mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8s_128_mask(
       (__v16qi)__A, (__v8hf)__B, (__v16qi)(__m128i)__W, (__mmask8)__U);
 }
 
+/// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Results are saturated. Zeroing mask \a __U is used to determine if given
+/// element should be zeroed instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 7
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+///
+/// dst[127:64] := 0
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 8-bit zeroing mask.
+/// \param __A
+///    A 128-bit vector of [8 x fp16].
+/// \param __B
+///    A 128-bit vector of [8 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    elements from \a __A and \a __B; higher order elements are zeroed. If
+///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8s_128_mask(
@@ -543,6 +1125,28 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
       (__mmask8)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Results are saturated.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8s_256_mask(
@@ -550,12 +1154,74 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
       (__mmask16)-1);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Results are saturated. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+/// 		dst.fp8[i] := _W[i]
+/// 	ELSE
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __W
+///    A 128-bit vector of [16 x fp8].
+/// \param __U
+///    A 16-bit merging mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8s_256_mask(
       (__v32qi)__A, (__v16hf)__B, (__v16qi)(__m128i)__W, (__mmask16)__U);
 }
 
+/// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
+/// floating-point elements and 8-bit integers stored in the lower half of
+/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+/// Results are saturated. Merging mask \a __U is used to determine if given
+/// element should be taken from \a __W instead.
+///
+/// \code{.operation}
+/// FOR i := 0 to 15
+/// 	IF __U[i]
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 ELSE
+///	 	dst.fp8[i] := 0
+///	 FI
+/// ENDFOR
+/// \endcode
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+///
+/// \param __U
+///    A 16-bit zeroing mask.
+/// \param __A
+///    A 256-bit vector of [16 x fp16].
+/// \param __B
+///    A 256-bit vector of [16 x int16].
+/// \returns
+///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    elements from \a __A and \a __B. If corresponding mask bit is not set,
+///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8s_256_mask(
@@ -566,7 +1232,7 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -599,7 +1265,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -641,7 +1307,7 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
 /// instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
 /// 		IF i < 8
@@ -680,7 +1346,7 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF i < 16 
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -713,7 +1379,7 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -755,7 +1421,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// Merging mask \a __U is used to determine if given element should be zeroed
 /// instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
@@ -795,7 +1461,7 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
 /// Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -828,7 +1494,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -870,7 +1536,7 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
 /// instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
 /// 		IF i < 8
@@ -910,7 +1576,7 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
 /// Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF i < 16 
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -943,7 +1609,7 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -985,7 +1651,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// Merging mask \a __U is used to determine if given element should be zeroed
 /// instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
@@ -1024,7 +1690,7 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1057,7 +1723,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -1099,7 +1765,7 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
 /// instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
 /// 		IF i < 8
@@ -1138,7 +1804,7 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF i < 16 
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1171,7 +1837,7 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -1213,7 +1879,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// Merging mask \a __U is used to determine if given element should be zeroed
 /// instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
@@ -1253,7 +1919,7 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
 /// Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1286,7 +1952,7 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -1328,7 +1994,7 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
 /// instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
 /// 		IF i < 8
@@ -1368,7 +2034,7 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
 /// Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF i < 16 
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1401,7 +2067,7 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -1443,7 +2109,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// Merging mask \a __U is used to determine if given element should be zeroed
 /// instead. Resulting elements are saturated in case of overflow.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 32 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
@@ -1482,7 +2148,7 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// Convert 128-bit vector \a __A, containing packed FP8 E4M3 floating-point
 /// elements to a 128-bit vector containing FP16 elements. The conversion is exact.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
 /// ENDFOR
@@ -1507,7 +2173,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// exact. Merging mask \a __U is used to determine if given element should be
 /// taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp16[i] := __W[i]
@@ -1542,7 +2208,7 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// exact. Zeroing mask \a __U is used to determine if given element should be
 /// zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
@@ -1573,7 +2239,7 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 /// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
 /// elements to a 256-bit vector containing FP16 elements. The conversion is exact.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
 /// ENDFOR
@@ -1599,7 +2265,7 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// exact. Merging mask \a __U is used to determine if given element should be
 /// taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		dst.fp16[i] := __W[i]
@@ -1634,7 +2300,7 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// exact. Zeroing mask \a __U is used to determine if given element should be
 /// zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
@@ -1666,7 +2332,7 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 /// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
 /// resulting vector are zeroed.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -1693,7 +2359,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// resulting vector are zeroed. Merging mask \a __U is used to determine if
 /// given element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -1731,7 +2397,7 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// resulting vector are zeroed. Zeroing mask \a __U is used to determine if
 /// given element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -1752,7 +2418,6 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
@@ -1765,7 +2430,7 @@ _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
 /// to a 128-bit vector containing E5M2 FP8 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -1792,7 +2457,7 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// to a 128-bit vector containing E5M2 FP8 elements. Merging mask \a __U is
 /// used to determine if given element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -1828,7 +2493,7 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// to a 128-bit vector containing E5M2 FP8 elements. Zeroing mask \a __U is
 /// used to determine if given element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -1862,7 +2527,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 /// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
 /// resulting vector are zeroed. Results are saturated.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -1889,7 +2554,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// resulting vector are zeroed. Results are saturated. Merging mask \a __U is
 /// used to determine if given element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -1926,7 +2591,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
 /// used to determine if given element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -1943,7 +2608,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __U
-///    A 8-bit merging mask.
+///    A 8-bit zeroing mask.
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
@@ -1959,7 +2624,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
 /// to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -1987,7 +2652,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -2024,7 +2689,7 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
 /// instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -2058,7 +2723,7 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 /// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
 /// resulting vector are zeroed.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -2085,7 +2750,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// resulting vector are zeroed. Merging mask \a __U is used to determine if
 /// given element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -2123,7 +2788,7 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// resulting vector are zeroed. Zeroing mask \a __U is used to determine if
 /// given element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -2157,7 +2822,7 @@ _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
 /// to a 128-bit vector containing E4M3 FP8 elements.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -2184,7 +2849,7 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// to a 128-bit vector containing E4M3 FP8 elements. Merging mask \a __U is
 /// used to determine if given element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -2220,7 +2885,7 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// to a 128-bit vector containing E4M3 FP8 elements. Zeroing mask \a __U is
 /// used to determine if given element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -2254,7 +2919,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 /// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
 /// resulting vector are zeroed. Results are saturated.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -2281,7 +2946,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// resulting vector are zeroed. Results are saturated. Merging mask \a __U is
 /// used to determine if given element should be taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -2318,7 +2983,7 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
 /// used to determine if given element should be zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -2351,7 +3016,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
 /// to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
@@ -2379,7 +3044,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// Merging mask \a __U is used to determine if given element should be taken
 /// from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
@@ -2416,7 +3081,7 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
 /// instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
@@ -2449,7 +3114,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 /// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
 /// elements to a 128-bit vector containing FP16 elements. The conversion is exact.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
 /// ENDFOR
@@ -2473,7 +3138,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 /// exact. Merging mask \a __U is used to determine if given element should be
 /// taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp16[i] := __W[i]
@@ -2508,7 +3173,7 @@ _mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
 /// exact. Zeroing mask \a __U is used to determine if given element should be
 /// zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
@@ -2538,7 +3203,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 /// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
 /// elements to a 256-bit vector containing FP16 elements. The conversion is exact.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15
 /// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
 /// ENDFOR
@@ -2562,7 +3227,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 /// exact. Merging mask \a __U is used to determine if given element should be
 /// taken from \a __W instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		dst.fp16[i] := __W[i]
@@ -2597,7 +3262,7 @@ _mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {
 /// exact. Zeroing mask \a __U is used to determine if given element should be
 /// zeroed instead.
 ///
-/// \code{.operation]
+/// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])

>From b069337d461da42883093aedeb00c019131d3559 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Fri, 20 Dec 2024 17:17:17 +0100
Subject: [PATCH 05/22] Add all comments

---
 clang/lib/Headers/avx10_2convertintrin.h | 281 +++++++++++------------
 1 file changed, 139 insertions(+), 142 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 341dc9cfd6aae3..52a64a5309763d 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -47,7 +47,7 @@
 /// \param __B
 ///    A 128-bit vector of [4 x float].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
+///    A 128-bit vector of [8 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
@@ -88,7 +88,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// \param __B
 ///    A 128-bit vector of [4 x float].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
+///    A 128-bit vector of [8 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -122,13 +122,13 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __U
-///    A 8-bit merging mask.
+///    A 8-bit zeroing mask.
 /// \param __A
 ///    A 128-bit vector of [4 x float].
 /// \param __B
 ///    A 128-bit vector of [4 x float].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Lower 4 elements correspond to the
+///    A 128-bit vector of [8 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then zero is taken instead.
@@ -161,7 +161,7 @@ _mm_maskz_cvtx2ps_ph(__mmask8 __U, __m128 __A, __m128 __B) {
 /// \param __B
 ///    A 256-bit vector of [8 x float].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower 8 elements correspond to the
+///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
@@ -197,13 +197,13 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 /// \param __W
 ///    A 256-bit vector of [16 x fp16].
 /// \param __U
-///    A 8-bit merging mask.
+///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [8 x float].
 /// \param __B
 ///    A 256-bit vector of [8 x float].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -238,13 +238,13 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __U
-///    A 8-bit zeroing mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [8 x float].
 /// \param __B
 ///    A 256-bit vector of [8 x float].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then zero is taken instead.
@@ -283,7 +283,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower 8 elements correspond to the
+///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 #define _mm256_cvtx_round2ps_ph(A, B, R)                                       \
@@ -318,7 +318,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// \param __W
 ///    A 256-bit vector of [16 x fp16].
 /// \param __U
-///    A 8-bit merging mask.
+///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [8 x float].
 /// \param __B
@@ -329,7 +329,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -361,7 +361,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __U
-///    A 8-bit zeroing mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [8 x float].
 /// \param __B
@@ -372,7 +372,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower 4 elements correspond to the
+///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then zero is taken instead.
@@ -395,7 +395,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -430,7 +430,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -470,7 +470,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __U
 ///    A 8-bit zeroing mask.
@@ -501,7 +501,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
@@ -535,7 +535,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -573,7 +573,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __U
 ///    A 16-bit zeroing mask.
@@ -607,7 +607,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -642,7 +642,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -682,7 +682,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __U
 ///    A 8-bit zeroing mask.
@@ -714,7 +714,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
@@ -748,7 +748,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -786,7 +786,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __U
 ///    A 16-bit zeroing mask.
@@ -819,7 +819,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -854,7 +854,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -894,7 +894,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __U
 ///    A 8-bit zeroing mask.
@@ -925,7 +925,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
@@ -959,7 +959,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -997,7 +997,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __U
 ///    A 16-bit zeroing mask.
@@ -1031,7 +1031,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8S`instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -1066,7 +1066,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -1106,7 +1106,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __U
 ///    A 8-bit zeroing mask.
@@ -1138,7 +1138,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
@@ -1172,7 +1172,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -1210,7 +1210,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
+/// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __U
 ///    A 16-bit zeroing mask.
@@ -1251,7 +1251,7 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
@@ -1292,7 +1292,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -1332,7 +1332,7 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -1347,7 +1347,7 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31 
 /// 	IF i < 16 
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
@@ -1365,7 +1365,7 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -1380,7 +1380,7 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// from \a __W instead.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	ELSE
@@ -1406,7 +1406,7 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -1422,7 +1422,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// instead.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
 /// 	ELSE
@@ -1446,7 +1446,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -1459,7 +1459,7 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
-/// Resulting elements are saturated in case of overflow.
+/// Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 16 
@@ -1480,7 +1480,7 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1492,7 +1492,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
 /// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of overflow.
+/// from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 16 
@@ -1510,7 +1510,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -1534,10 +1534,10 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of overflow.
+/// instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1552,7 +1552,7 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __U
 ///    A 16-bit zeroing mask.
@@ -1561,7 +1561,7 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -1574,10 +1574,10 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
-/// Resulting elements are saturated in case of overflow.
+/// Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31
 /// 	IF i < 16 
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
@@ -1588,14 +1588,14 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -1607,10 +1607,10 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
 /// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of overflow.
+/// from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	ELSE
@@ -1625,7 +1625,7 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
 ///    A 256-bit vector of [32 x fp8].
@@ -1636,7 +1636,7 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -1649,10 +1649,10 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
 /// Merging mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of overflow.
+/// instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
 /// 	ELSE
@@ -1667,7 +1667,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __U
 ///    A 32-bit zeroing mask.
@@ -1676,7 +1676,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -1691,7 +1691,7 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
@@ -1709,7 +1709,7 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
@@ -1724,7 +1724,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// from \a __W instead.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
 /// 	ELSE
@@ -1750,7 +1750,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -1766,7 +1766,7 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// instead.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1790,7 +1790,7 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -1805,7 +1805,7 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31
 /// 	IF i < 16 
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
@@ -1823,7 +1823,7 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -1838,7 +1838,7 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// from \a __W instead.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	ELSE
@@ -1864,7 +1864,7 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -1880,7 +1880,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// instead.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
 /// 	ELSE
@@ -1904,7 +1904,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -1917,10 +1917,10 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
-/// Resulting elements are saturated in case of overflow.
+/// Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
@@ -1931,14 +1931,14 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1950,10 +1950,10 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
 /// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of overflow.
+/// from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W[i]
 /// 	ELSE
@@ -1968,7 +1968,7 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -1979,7 +1979,7 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -1992,10 +1992,10 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
 /// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of overflow.
+/// instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -2010,7 +2010,7 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __U
 ///    A 16-bit zeroing mask.
@@ -2019,7 +2019,7 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -2032,11 +2032,11 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
-/// Resulting elements are saturated in case of overflow.
+/// Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
-/// 	IF i < 16 
+/// FOR i := 0 to 15
+/// 	IF i < 16
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
@@ -2046,14 +2046,14 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -2065,10 +2065,10 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
 /// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of overflow.
+/// from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	ELSE
@@ -2083,7 +2083,7 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __W
 ///    A 256-bit vector of [32 x fp8].
@@ -2094,7 +2094,7 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is set, then
 ///    element from \a __W is taken instead.
@@ -2107,10 +2107,10 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
 /// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
 /// Merging mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of overflow.
+/// instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 32 
+/// FOR i := 0 to 31 
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
 /// 	ELSE
@@ -2125,7 +2125,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __U
 ///    A 32-bit zeroing mask.
@@ -2134,7 +2134,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower 16 elements correspond to the
+///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -2150,7 +2150,7 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
 /// ENDFOR
 /// \endcode
 ///
@@ -2178,7 +2178,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// 	IF __U[i]
 /// 		dst.fp16[i] := __W[i]
 /// 	ELSE
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -2211,7 +2211,7 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI
@@ -2241,7 +2241,7 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
 /// ENDFOR
 /// \endcode
 ///
@@ -2270,7 +2270,7 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// 	IF __U[i]
 /// 		dst.fp16[i] := __W[i]
 /// 	ELSE
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -2315,7 +2315,7 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// This intrinsic corresponds to the \c VCVTHF82PH instruction.
 ///
 /// \param __U
-///    A 16-bit merging mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [32 x fp8].
 /// \returns
@@ -2342,7 +2342,7 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNEPH2BF8 instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -2373,7 +2373,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNEPH2BF8 instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -2382,7 +2382,6 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is set, then element from \a __W is taken instead.
@@ -2411,7 +2410,7 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNEPH2BF8 instruction.
 ///
 /// \param __U
 ///    A 8-bit merging mask.
@@ -2510,7 +2509,7 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __U
-///    A 16-bit merging mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
@@ -2537,7 +2536,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -2568,7 +2567,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -2605,7 +2604,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __U
 ///    A 8-bit zeroing mask.
@@ -2634,7 +2633,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
@@ -2666,7 +2665,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -2703,10 +2702,10 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __U
-///    A 16-bit merging mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
@@ -2733,7 +2732,7 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNEPH2HF8 instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -2773,7 +2772,6 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is set, then element from \a __W is taken instead.
@@ -2805,11 +2803,10 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __U
-///    A 8-bit merging mask.
+///    A 8-bit zeroing mask.
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
@@ -2902,7 +2899,7 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __U
-///    A 16-bit merging mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
@@ -2929,7 +2926,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNEPH2HF8S instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
@@ -2960,7 +2957,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -2997,10 +2994,10 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __U
-///    A 8-bit merging mask.
+///    A 8-bit zeroing mask.
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
@@ -3026,7 +3023,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
@@ -3058,7 +3055,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [16 x fp8].
@@ -3095,10 +3092,10 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
+/// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __U
-///    A 16-bit merging mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
@@ -3122,7 +3119,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+/// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [16 x fp8].
@@ -3150,7 +3147,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+/// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [8 x fp16].
@@ -3185,7 +3182,7 @@ _mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+/// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __U
 ///    A 8-bit zeroing mask.
@@ -3211,7 +3208,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+/// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [32 x fp8].
@@ -3239,7 +3236,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+/// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __W
 ///    A 256-bit vector of [16 x fp16].
@@ -3274,10 +3271,10 @@ _mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic corresponds to the \c VCVTHF82PH instruction.
+/// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __U
-///    A 16-bit merging mask.
+///    A 16-bit zeroing mask.
 /// \param __A
 ///    A 256-bit vector of [32 x fp8].
 /// \returns

>From b1e7a88dfccdd28df46cbd5ee5da3060cbc602f7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sat, 21 Dec 2024 19:50:29 +0100
Subject: [PATCH 06/22] Revierwer suggestions: correct merging masking order,
 typos

---
 clang/lib/Headers/avx10_2convertintrin.h | 202 +++++++++++------------
 1 file changed, 101 insertions(+), 101 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 52a64a5309763d..a4faf359e82c26 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -63,14 +63,14 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	IF mask[i]
-/// 		dst.fp16[i] := __W[i]
-/// 	ELSE
+/// 	IF __U[i]
 /// 		IF i < 4
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -90,7 +90,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
@@ -105,7 +105,7 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	IF mask[i]
+/// 	IF __U[i]
 /// 		IF i < 4
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
@@ -178,14 +178,14 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	IF mask[i]
-/// 		dst.fp16[i] := __W[i]
-/// 	ELSE
+/// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -205,7 +205,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
@@ -221,7 +221,7 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
-/// 	IF mask[i]
+/// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
@@ -299,14 +299,14 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	IF mask[i]
-/// 		dst.fp16[i] := __W[i]
-/// 	ELSE
+/// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -331,7 +331,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 #define _mm256_mask_cvtx_round2ps_ph(W, U, A, B, R)                            \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
@@ -344,7 +344,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
-/// 	IF mask[i]
+/// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp16[i] := convert_fp32_to_fp16(__B.fp32[i])
 /// 		ELSE
@@ -419,9 +419,9 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -443,7 +443,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8_128_mask(
@@ -526,9 +526,9 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -547,7 +547,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 ///    A 256-bit vector of [16 x int16].
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
-///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
@@ -631,9 +631,9 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -655,7 +655,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2bf8s_128_mask(
@@ -739,9 +739,9 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -760,7 +760,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 ///    A 256-bit vector of [16 x int16].
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
-///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
@@ -843,9 +843,9 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -867,7 +867,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8_128_mask(
@@ -950,9 +950,9 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -971,7 +971,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 ///    A 256-bit vector of [16 x int16].
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
-///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
@@ -1055,9 +1055,9 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1079,7 +1079,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
   return (__m128i)__builtin_ia32_vcvtbiasph2hf8s_128_mask(
@@ -1163,9 +1163,9 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := _W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	ELSE
+/// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1184,7 +1184,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 ///    A 256-bit vector of [16 x int16].
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
-///    elements from \a __A and \a __B. If corresponding mask bit is set, then
+///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
     __m128i __W, __mmask16 __U, __m256i __A, __m256h __B) {
@@ -1268,13 +1268,13 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1294,7 +1294,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
@@ -1382,13 +1382,13 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W.fp8[i]
-/// 	ELSE
 /// 		IF i < 16 
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1408,7 +1408,7 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// \returns
 ///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
@@ -1497,13 +1497,13 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 16 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1523,7 +1523,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
@@ -1612,13 +1612,13 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W.fp8[i]
-/// 	ELSE
 /// 		IF i < 16 
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1638,7 +1638,7 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// \returns
 ///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
@@ -1726,13 +1726,13 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1752,7 +1752,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
@@ -1840,13 +1840,13 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W.fp8[i]
-/// 	ELSE
 /// 		IF i < 16 
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1866,7 +1866,7 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// \returns
 ///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
@@ -1955,13 +1955,13 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -1981,7 +1981,7 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
@@ -2070,13 +2070,13 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W.fp8[i]
-/// 	ELSE
 /// 		IF i < 16 
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 		ELSE
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 		FI
+/// 	ELSE
+/// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -2096,7 +2096,7 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// \returns
 ///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
     __m256i __W, __mmask32 __U, __m256h __A, __m256h __B) {
@@ -2176,9 +2176,9 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -2195,7 +2195,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 ///    A 128-bit vector of [16 x fp8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
@@ -2268,9 +2268,9 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -2287,7 +2287,7 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 ///    A 256-bit vector of [32 x fp8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
@@ -2362,9 +2362,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2384,7 +2384,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_128_mask(
@@ -2399,9 +2399,9 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
-/// 	ELSE
 /// 		dst.fp8[i] := 0
+/// 	ELSE
+/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// 	FI
 /// ENDFOR
 ///
@@ -2459,9 +2459,9 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2481,7 +2481,7 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_256_mask(
@@ -2556,9 +2556,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2578,7 +2578,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_128_mask(
@@ -2654,9 +2654,9 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2676,7 +2676,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_256_mask(
@@ -2752,9 +2752,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2774,7 +2774,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_128_mask(
@@ -2849,9 +2849,9 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2871,7 +2871,7 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_256_mask(
@@ -2946,9 +2946,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2968,7 +2968,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_128_mask(
@@ -3044,9 +3044,9 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	ELSE
+/// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -3066,7 +3066,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// \returns
 ///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
-///    corresponding mask bit is set, then element from \a __W is taken instead.
+///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_256_mask(
@@ -3138,9 +3138,9 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -3157,7 +3157,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 ///    A 128-bit vector of [16 x fp8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
 _mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
@@ -3227,9 +3227,9 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := __W[i]
-/// 	ELSE
 /// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	ELSE
+/// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
 /// \endcode
@@ -3246,7 +3246,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 ///    A 256-bit vector of [32 x fp8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
-///    (converted) elements from \a __A. If corresponding mask bit is set, then
+///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {

>From 1c76481f397260d5b3a226b3c93aa514c1e413e1 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sat, 21 Dec 2024 21:18:34 +0100
Subject: [PATCH 07/22] Reviewer suggestions: add bf8, hf8

---
 clang/lib/Headers/avx10_2convertintrin.h | 722 +++++++++++------------
 1 file changed, 361 insertions(+), 361 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index a4faf359e82c26..077d8c95b2f38b 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -25,8 +25,8 @@
                  __min_vector_width__(256)))
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 128-bit vector
-/// containing FP16 elements.
+///    single-precision (32-bit) floating-point elements to a 128-bit vector
+///    containing FP16 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -57,9 +57,9 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 128-bit vector
-/// containing FP16 elements. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    single-precision (32-bit) floating-point elements to a 128-bit vector
+///    containing FP16 elements. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -99,9 +99,9 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 128-bit vector
-/// containing FP16 elements. Zeroing mask \a __U is used to determine if given
-/// element should be zeroed instead.
+///    single-precision (32-bit) floating-point elements to a 128-bit vector
+///    containing FP16 elements. Zeroing mask \a __U is used to determine if given
+///    element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -139,8 +139,8 @@ _mm_maskz_cvtx2ps_ph(__mmask8 __U, __m128 __A, __m128 __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 256-bit vector
-/// containing FP16 elements.
+///    single-precision (32-bit) floating-point elements to a 256-bit vector
+///    containing FP16 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -172,9 +172,9 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 256-bit vector
-/// containing FP16 elements. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    single-precision (32-bit) floating-point elements to a 256-bit vector
+///    containing FP16 elements. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -215,9 +215,9 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 256-bit vector
-/// containing FP16 elements. Zeroing mask \a __U is used to determine if given
-/// element should be zeroed instead.
+///    single-precision (32-bit) floating-point elements to a 256-bit vector
+///    containing FP16 elements. Zeroing mask \a __U is used to determine if given
+///    element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -256,8 +256,8 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 256-bit vector
-/// containing FP16 elements. Rounding mode \a __R needs to be provided.
+///    single-precision (32-bit) floating-point elements to a 256-bit vector
+///    containing FP16 elements. Rounding mode \a __R needs to be provided.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -292,10 +292,10 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
       (__mmask16)(-1), (const int)(R)))
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 256-bit vector
-/// containing FP16 elements. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead. Rounding mode \a __R needs to
-/// be provided.
+///    single-precision (32-bit) floating-point elements to a 256-bit vector
+///    containing FP16 elements. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead. Rounding mode \a __R needs to
+///    be provided.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -338,9 +338,9 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
       (__v8sf)(A), (__v8sf)(B), (__v16hf)(W), (__mmask16)(U), (const int)(R)))
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
-/// single-precision (32-bit) floating-point elements to a 256-bit vector
-/// containing FP16 elements. Zeroing mask \a __U is used to determine if given
-/// element should be zeroed instead. Rounding mode \a __R needs to be provided.
+///    single-precision (32-bit) floating-point elements to a 256-bit vector
+///    containing FP16 elements. Zeroing mask \a __U is used to determine if given
+///    element should be zeroed instead. Rounding mode \a __R needs to be provided.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -382,8 +382,8 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
       (__mmask16)(U), (const int)(R)))
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -402,7 +402,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
@@ -411,10 +411,10 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -433,7 +433,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
@@ -441,7 +441,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -451,10 +451,10 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -479,7 +479,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -490,8 +490,8 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -508,7 +508,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
@@ -518,10 +518,10 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -538,7 +538,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -546,7 +546,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
@@ -556,10 +556,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -582,7 +582,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,
 ///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -593,9 +593,9 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Results are saturated.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -614,7 +614,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
@@ -623,10 +623,10 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Results are saturated. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Results are saturated. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -645,7 +645,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
@@ -653,7 +653,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -663,10 +663,10 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Results are saturated. Zeroing mask \a __U is used to determine if given
-/// element should be zeroed instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Results are saturated. Zeroing mask \a __U is used to determine if given
+///    element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -691,7 +691,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -702,9 +702,9 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Results are saturated.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -721,7 +721,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
@@ -731,10 +731,10 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Results are saturated. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Results are saturated. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -751,7 +751,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -759,7 +759,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
@@ -769,10 +769,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
-/// Results are saturated. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E5M2.
+///    Results are saturated. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -795,7 +795,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,
 ///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -806,8 +806,8 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -826,7 +826,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
@@ -835,10 +835,10 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -857,7 +857,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
@@ -865,7 +865,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -875,10 +875,10 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -903,7 +903,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -914,8 +914,8 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -932,7 +932,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
@@ -942,10 +942,10 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -962,7 +962,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -970,7 +970,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
@@ -980,10 +980,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3 
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3 
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1006,7 +1006,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,
 ///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -1017,9 +1017,9 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Results are saturated.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -1038,7 +1038,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
 _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
@@ -1047,10 +1047,10 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Results are saturated. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Results are saturated. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -1069,7 +1069,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
@@ -1077,7 +1077,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1087,10 +1087,10 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Results are saturated. Zeroing mask \a __U is used to determine if given
-/// element should be zeroed instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Results are saturated. Zeroing mask \a __U is used to determine if given
+///    element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -1115,7 +1115,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1126,9 +1126,9 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Results are saturated.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1145,7 +1145,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
@@ -1155,10 +1155,10 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Results are saturated. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Results are saturated. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1175,7 +1175,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -1183,7 +1183,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
@@ -1193,10 +1193,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 }
 
 /// Add two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements and 8-bit integers stored in the lower half of
-/// packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
-/// Results are saturated. Merging mask \a __U is used to determine if given
-/// element should be taken from \a __W instead.
+///    floating-point elements and 8-bit integers stored in the lower half of
+///    packed 16-bit integers, respectively. Results are converted to FP8 E4M3.
+///    Results are saturated. Merging mask \a __U is used to determine if given
+///    element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1219,7 +1219,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Elements correspond to the sum of
+///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,
 ///    then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -1230,7 +1230,7 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+///    floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 16 
@@ -1251,7 +1251,7 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
@@ -1261,9 +1261,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1284,7 +1284,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -1292,7 +1292,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -1303,9 +1303,9 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 16 
@@ -1332,7 +1332,7 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -1344,7 +1344,7 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+///    floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31 
@@ -1365,7 +1365,7 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -1375,9 +1375,9 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31 
@@ -1398,7 +1398,7 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x bf8].
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
@@ -1406,7 +1406,7 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -1417,9 +1417,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31 
@@ -1446,7 +1446,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -1458,8 +1458,8 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
-/// Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+///    Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 16 
@@ -1480,7 +1480,7 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1490,9 +1490,9 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 16 
@@ -1513,7 +1513,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -1521,7 +1521,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower 8 elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower 8 elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -1532,9 +1532,9 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1561,7 +1561,7 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -1573,8 +1573,8 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
-/// Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+///    Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31
@@ -1595,7 +1595,7 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -1605,9 +1605,9 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31
@@ -1628,7 +1628,7 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x bf8].
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
@@ -1636,7 +1636,7 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -1647,9 +1647,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 256-bit vector containing E5M2 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be zeroed
+///    instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31
@@ -1676,7 +1676,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -1688,7 +1688,7 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+///    floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1709,7 +1709,7 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
@@ -1719,9 +1719,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1742,7 +1742,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -1750,7 +1750,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -1761,9 +1761,9 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -1790,7 +1790,7 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -1802,7 +1802,7 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+///    floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31
@@ -1823,7 +1823,7 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -1833,9 +1833,9 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31
@@ -1856,7 +1856,7 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
@@ -1864,7 +1864,7 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -1875,9 +1875,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31
@@ -1904,7 +1904,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -1916,8 +1916,8 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
-/// Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+///    Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1938,7 +1938,7 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1948,9 +1948,9 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -1971,7 +1971,7 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -1979,7 +1979,7 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -1990,9 +1990,9 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 128-bit vector containing E4M3 FP8 elements.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2019,7 +2019,7 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
@@ -2031,8 +2031,8 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
-/// Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+///    Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2053,7 +2053,7 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -2063,9 +2063,9 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31
@@ -2086,7 +2086,7 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
@@ -2094,7 +2094,7 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -2105,9 +2105,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 }
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed FP16
-/// floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
-/// Merging mask \a __U is used to determine if given element should be zeroed
-/// instead. Resulting elements are saturated in case of an overflow.
+///    floating-point elements to a 256-bit vector containing E4M3 FP8 elements.
+///    Merging mask \a __U is used to determine if given element should be zeroed
+///    instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 31 
@@ -2134,7 +2134,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// \param __B
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 256-bit vector of [32 x fp8]. Lower elements correspond to the
+///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    zero is taken instead.
@@ -2146,7 +2146,7 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 }
 
 /// Convert 128-bit vector \a __A, containing packed FP8 E4M3 floating-point
-/// elements to a 128-bit vector containing FP16 elements. The conversion is exact.
+///    elements to a 128-bit vector containing FP16 elements. The conversion is exact.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2159,7 +2159,7 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTHF82PH instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
@@ -2169,9 +2169,9 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 }
 
 /// Convert 128-bit vector \a __A, containing packed FP8 E4M3 floating-point
-/// elements to a 128-bit vector containing FP16 elements. The conversion is
-/// exact. Merging mask \a __U is used to determine if given element should be
-/// taken from \a __W instead.
+///    elements to a 128-bit vector containing FP16 elements. The conversion is
+///    exact. Merging mask \a __U is used to determine if given element should be
+///    taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2192,7 +2192,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
@@ -2204,9 +2204,9 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 }
 
 /// Convert 128-bit vector \a __A, containing packed FP8 E4M3 floating-point
-/// elements to a 128-bit vector containing FP16 elements. The conversion is
-/// exact. Zeroing mask \a __U is used to determine if given element should be
-/// zeroed instead.
+///    elements to a 128-bit vector containing FP16 elements. The conversion is
+///    exact. Zeroing mask \a __U is used to determine if given element should be
+///    zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2225,7 +2225,7 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
@@ -2237,7 +2237,7 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 }
 
 /// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
-/// elements to a 256-bit vector containing FP16 elements. The conversion is exact.
+///    elements to a 256-bit vector containing FP16 elements. The conversion is exact.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2250,7 +2250,7 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 /// This intrinsic corresponds to the \c VCVTHF82PH instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
@@ -2261,9 +2261,9 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 }
 
 /// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
-/// elements to a 256-bit vector containing FP16 elements. The conversion is
-/// exact. Merging mask \a __U is used to determine if given element should be
-/// taken from \a __W instead.
+///    elements to a 256-bit vector containing FP16 elements. The conversion is
+///    exact. Merging mask \a __U is used to determine if given element should be
+///    taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -2284,7 +2284,7 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
@@ -2296,9 +2296,9 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 }
 
 /// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
-/// elements to a 256-bit vector containing FP16 elements. The conversion is
-/// exact. Zeroing mask \a __U is used to determine if given element should be
-/// zeroed instead.
+///    elements to a 256-bit vector containing FP16 elements. The conversion is
+///    exact. Zeroing mask \a __U is used to determine if given element should be
+///    zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -2317,7 +2317,7 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
@@ -2329,8 +2329,8 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
-/// resulting vector are zeroed.
+///    to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+///    resulting vector are zeroed.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2347,7 +2347,7 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8_128_mask(
@@ -2355,9 +2355,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Merging mask \a __U is used to determine if
-/// given element should be taken from \a __W instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Merging mask \a __U is used to determine if
+///    given element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2376,13 +2376,13 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNEPH2BF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -2392,9 +2392,9 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Zeroing mask \a __U is used to determine if
-/// given element should be zeroed instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Zeroing mask \a __U is used to determine if
+///    given element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2417,7 +2417,7 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -2427,7 +2427,7 @@ _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements.
+///    to a 128-bit vector containing E5M2 FP8 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2444,7 +2444,7 @@ _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtneph_pbf8(__m256h __A) {
@@ -2453,8 +2453,8 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Merging mask \a __U is
-/// used to determine if given element should be taken from \a __W instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Merging mask \a __U is
+///    used to determine if given element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2473,13 +2473,13 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -2489,8 +2489,8 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Zeroing mask \a __U is
-/// used to determine if given element should be zeroed instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Zeroing mask \a __U is
+///    used to determine if given element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2513,7 +2513,7 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -2523,8 +2523,8 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Results are saturated.
+///    to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2541,7 +2541,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2bf8s_128_mask(
@@ -2549,9 +2549,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Results are saturated. Merging mask \a __U is
-/// used to determine if given element should be taken from \a __W instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Results are saturated. Merging mask \a __U is
+///    used to determine if given element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2570,13 +2570,13 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -2586,9 +2586,9 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
-/// used to determine if given element should be zeroed instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
+///    used to determine if given element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2611,7 +2611,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -2621,7 +2621,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
+///    to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2638,7 +2638,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtnesph_pbf8(__m256h __A) {
@@ -2647,9 +2647,9 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2668,13 +2668,13 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x bf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -2684,9 +2684,9 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    to a 128-bit vector containing E5M2 FP8 elements. Results are saturated.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -2709,7 +2709,7 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -2719,8 +2719,8 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
-/// resulting vector are zeroed.
+///    to a 128-bit vector containing E5M2 FP8 elements. Upper elements of
+///    resulting vector are zeroed.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2737,7 +2737,7 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8_128_mask(
@@ -2745,9 +2745,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Merging mask \a __U is used to determine if
-/// given element should be taken from \a __W instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Merging mask \a __U is used to determine if
+///    given element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2766,13 +2766,13 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -2782,9 +2782,9 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Zeroing mask \a __U is used to determine if
-/// given element should be zeroed instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Zeroing mask \a __U is used to determine if
+///    given element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2807,7 +2807,7 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -2817,7 +2817,7 @@ _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements.
+///    to a 128-bit vector containing E4M3 FP8 elements.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2834,7 +2834,7 @@ _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtneph_phf8(__m256h __A) {
@@ -2843,8 +2843,8 @@ _mm256_cvtneph_phf8(__m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Merging mask \a __U is
-/// used to determine if given element should be taken from \a __W instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Merging mask \a __U is
+///    used to determine if given element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2863,13 +2863,13 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -2879,8 +2879,8 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Zeroing mask \a __U is
-/// used to determine if given element should be zeroed instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Zeroing mask \a __U is
+///    used to determine if given element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -2903,7 +2903,7 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -2913,8 +2913,8 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Results are saturated.
+///    to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2931,7 +2931,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the (converted)
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
 static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
   return (__m128i)__builtin_ia32_vcvtneph2hf8s_128_mask(
@@ -2939,9 +2939,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Results are saturated. Merging mask \a __U is
-/// used to determine if given element should be taken from \a __W instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Results are saturated. Merging mask \a __U is
+///    used to determine if given element should be taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -2960,13 +2960,13 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -2976,9 +2976,9 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 }
 
 /// Convert 128-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
-/// resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
-/// used to determine if given element should be zeroed instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Upper elements of
+///    resulting vector are zeroed. Results are saturated. Zeroing mask \a __U is
+///    used to determine if given element should be zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -3001,7 +3001,7 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 128-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Lower elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
 ///    corresponding mask bit is not set, then element is zeroed.
 static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -3011,7 +3011,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
+///    to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -3028,7 +3028,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the (converted)
+///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
 _mm256_cvtnesph_phf8(__m256h __A) {
@@ -3037,9 +3037,9 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
-/// Merging mask \a __U is used to determine if given element should be taken
-/// from \a __W instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
+///    Merging mask \a __U is used to determine if given element should be taken
+///    from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -3058,13 +3058,13 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [8 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
 ///    corresponding mask bit is not set, then element from \a __W is taken instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -3074,9 +3074,9 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 }
 
 /// Convert 256-bit vector \a __A containing packed FP16 floating-point elements
-/// to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
-/// Zeroing mask \a __U is used to determine if given element should be zeroed
-/// instead.
+///    to a 128-bit vector containing E4M3 FP8 elements. Results are saturated.
+///    Zeroing mask \a __U is used to determine if given element should be zeroed
+///    instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -3099,7 +3099,7 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 256-bit vector of [16 x fp16].
 /// \returns
-///    A 128-bit vector of [16 x fp8]. Resulting elements correspond to the
+///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then element is zeroed instead.
 static __inline__ __m128i __DEFAULT_FN_ATTRS256
@@ -3109,7 +3109,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 }
 
 /// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
-/// elements to a 128-bit vector containing FP16 elements. The conversion is exact.
+///    elements to a 128-bit vector containing FP16 elements. The conversion is exact.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -3122,7 +3122,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 /// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
@@ -3131,9 +3131,9 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 }
 
 /// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
-/// elements to a 128-bit vector containing FP16 elements. The conversion is
-/// exact. Merging mask \a __U is used to determine if given element should be
-/// taken from \a __W instead.
+///    elements to a 128-bit vector containing FP16 elements. The conversion is
+///    exact. Merging mask \a __U is used to determine if given element should be
+///    taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -3154,7 +3154,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
@@ -3166,9 +3166,9 @@ _mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
 }
 
 /// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
-/// elements to a 128-bit vector containing FP16 elements. The conversion is
-/// exact. Zeroing mask \a __U is used to determine if given element should be
-/// zeroed instead.
+///    elements to a 128-bit vector containing FP16 elements. The conversion is
+///    exact. Zeroing mask \a __U is used to determine if given element should be
+///    zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
@@ -3187,7 +3187,7 @@ _mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [16 x fp8].
+///    A 128-bit vector of [16 x hf8].
 /// \returns
 ///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
@@ -3198,7 +3198,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 }
 
 /// Convert 256-bit vector \a __A, containing packed FP8 E4M3 floating-point
-/// elements to a 256-bit vector containing FP16 elements. The conversion is exact.
+///    elements to a 256-bit vector containing FP16 elements. The conversion is exact.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
@@ -3211,7 +3211,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 /// This intrinsic does not corresponds to a single instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
@@ -3220,9 +3220,9 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 }
 
 /// Convert 256-bit vector \a __A, containing packed FP8 E5M2 floating-point
-/// elements to a 256-bit vector containing FP16 elements. The conversion is
-/// exact. Merging mask \a __U is used to determine if given element should be
-/// taken from \a __W instead.
+///    elements to a 256-bit vector containing FP16 elements. The conversion is
+///    exact. Merging mask \a __U is used to determine if given element should be
+///    taken from \a __W instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -3243,7 +3243,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
@@ -3255,9 +3255,9 @@ _mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {
 }
 
 /// Convert 256-bit vector \a __A, containing packed FP8 E5M2 floating-point
-/// elements to a 256-bit vector containing FP16 elements. The conversion is
-/// exact. Zeroing mask \a __U is used to determine if given element should be
-/// zeroed instead.
+///    elements to a 256-bit vector containing FP16 elements. The conversion is
+///    exact. Zeroing mask \a __U is used to determine if given element should be
+///    zeroed instead.
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15 
@@ -3276,7 +3276,7 @@ _mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [32 x fp8].
+///    A 256-bit vector of [32 x hf8].
 /// \returns
 ///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then

>From 6a6a0c563f2f885c517f1f0076502617e30e3fcc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sat, 21 Dec 2024 22:31:32 +0100
Subject: [PATCH 08/22] Further reviewer suggestions: add missing dst[max:]

---
 clang/lib/Headers/avx10_2convertintrin.h | 196 +++++++++++++++++------
 1 file changed, 146 insertions(+), 50 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 077d8c95b2f38b..aa6a502de47dbb 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -35,6 +35,8 @@
 /// 	ELSE
 /// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
 /// 	FI
+///
+/// dst[MAX:127] := 0
 /// ENDFOR
 /// \endcode
 ///
@@ -72,6 +74,8 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
+///
+/// dst[MAX:127] := 0
 /// ENDFOR
 /// \endcode
 ///
@@ -115,6 +119,8 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 /// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:127] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -150,6 +156,8 @@ _mm_maskz_cvtx2ps_ph(__mmask8 __U, __m128 __A, __m128 __B) {
 /// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -188,6 +196,8 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -231,6 +241,8 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 /// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -267,6 +279,8 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 8])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -309,6 +323,8 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -354,6 +370,8 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -390,7 +408,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -425,7 +443,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -465,7 +483,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///	 FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -497,6 +515,8 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -531,6 +551,8 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -569,6 +591,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 ///	 	dst.fp8[i] := 0
 ///	 FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -602,7 +626,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -637,7 +661,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -677,7 +701,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///	 FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -710,6 +734,8 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -744,6 +770,8 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -782,6 +810,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 ///	 	dst.fp8[i] := 0
 ///	 FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -814,7 +844,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -849,7 +879,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -889,7 +919,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///	 FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1026,7 +1056,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1061,7 +1091,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1101,7 +1131,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 ///	 FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1134,6 +1164,8 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1168,6 +1200,8 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1206,6 +1240,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 ///	 	dst.fp8[i] := 0
 ///	 FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1233,13 +1269,15 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///    floating-point elements to a 128-bit vector containing E5M2 FP8 elements.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15 
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1277,6 +1315,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1308,7 +1348,7 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 ///    instead.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1319,6 +1359,8 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1354,6 +1396,8 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1391,6 +1435,8 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1433,6 +1479,8 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// 		FI
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1462,13 +1510,15 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 ///    Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15 
 /// 	IF i < 8
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1495,7 +1545,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 ///    from \a __W instead. Resulting elements are saturated in case of an overflow.
 ///
 /// \code{.operation}
-/// FOR i := 0 to 16 
+/// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
 /// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
@@ -1506,6 +1556,8 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1548,6 +1600,8 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1584,6 +1638,8 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1621,6 +1677,8 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1663,6 +1721,8 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// 		FI
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1698,6 +1758,8 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1735,6 +1797,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1777,6 +1841,8 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1812,6 +1878,8 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1849,6 +1917,8 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1891,6 +1961,8 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// 		FI
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1927,6 +1999,8 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1964,6 +2038,8 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2006,6 +2082,8 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2042,6 +2120,8 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2079,6 +2159,8 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// 		dst.fp8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2121,6 +2203,8 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// 		FI
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2152,6 +2236,8 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// FOR i := 0 to 7
 /// 	dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2181,6 +2267,8 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2216,6 +2304,8 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2243,6 +2333,8 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 /// FOR i := 0 to 15
 /// 	dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2273,6 +2365,8 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2308,6 +2402,8 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// 		dst.fp16[i] := 0
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:256] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2337,7 +2433,7 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2368,7 +2464,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2405,7 +2501,7 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2434,7 +2530,7 @@ _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2465,7 +2561,7 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2501,7 +2597,7 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2531,7 +2627,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2562,7 +2658,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2599,7 +2695,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2628,7 +2724,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2660,7 +2756,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2697,7 +2793,7 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2727,7 +2823,7 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2758,7 +2854,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2795,7 +2891,7 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2824,7 +2920,7 @@ _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2855,7 +2951,7 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2891,7 +2987,7 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2921,7 +3017,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2952,7 +3048,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -2989,7 +3085,7 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -3018,7 +3114,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 /// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -3050,7 +3146,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[255:128] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -3087,7 +3183,7 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[127:64] := 0
+/// dst[MAX:64] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -3119,7 +3215,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic does not corresponds to a single instruction.
+/// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __A
 ///    A 128-bit vector of [16 x hf8].
@@ -3147,7 +3243,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic does not corresponds to a single instruction.
+/// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __W
 ///    A 128-bit vector of [8 x fp16].
@@ -3160,9 +3256,9 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
-_mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
+_mm_mask_cvtpbf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
   return _mm_castsi128_ph(
-      _mm_mask_slli_epi16((__m128i)__S, __U, _mm_cvtepi8_epi16(__A), 8));
+      _mm_mask_slli_epi16((__m128i)__W, __U, _mm_cvtepi8_epi16(__A), 8));
 }
 
 /// Convert 128-bit vector \a __A, containing packed FP8 E5M2 floating-point
@@ -3182,7 +3278,7 @@ _mm_mask_cvtpbf8_ph(__m128h __S, __mmask8 __U, __m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic does not corresponds to a single instruction.
+/// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __U
 ///    A 8-bit zeroing mask.
@@ -3208,7 +3304,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic does not corresponds to a single instruction.
+/// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __A
 ///    A 256-bit vector of [32 x hf8].
@@ -3236,7 +3332,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic does not corresponds to a single instruction.
+/// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __W
 ///    A 256-bit vector of [16 x fp16].
@@ -3249,9 +3345,9 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {
+_mm256_mask_cvtpbf8_ph(__m256h __W, __mmask8 __U, __m128i __A) {
   return _mm256_castsi256_ph(
-      _mm256_mask_slli_epi16((__m256i)__S, __U, _mm256_cvtepi8_epi16(__A), 8));
+      _mm256_mask_slli_epi16((__m256i)__W, __U, _mm256_cvtepi8_epi16(__A), 8));
 }
 
 /// Convert 256-bit vector \a __A, containing packed FP8 E5M2 floating-point
@@ -3271,7 +3367,7 @@ _mm256_mask_cvtpbf8_ph(__m256h __S, __mmask8 __U, __m128i __A) {
 ///
 /// \headerfile <immintrin.h>
 ///
-/// This intrinsic does not corresponds to a single instruction.
+/// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __U
 ///    A 16-bit zeroing mask.

>From 647d10e45fc8bf8127b9877ff07b7723f5e5b827 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sat, 21 Dec 2024 22:32:55 +0100
Subject: [PATCH 09/22] Disable clang-format

---
 clang/lib/Headers/avx10_2convertintrin.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index aa6a502de47dbb..622be0ac0b1113 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -24,6 +24,8 @@
   __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-256"),    \
                  __min_vector_width__(256)))
 
+//clang-format off
+
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed
 ///    single-precision (32-bit) floating-point elements to a 128-bit vector
 ///    containing FP16 elements.
@@ -3383,6 +3385,8 @@ _mm256_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
       _mm256_slli_epi16(_mm256_maskz_cvtepi8_epi16(__U, __A), 8));
 }
 
+// clang-format on
+
 #undef __DEFAULT_FN_ATTRS128
 #undef __DEFAULT_FN_ATTRS256
 

>From 6a7d5189a0309e9cfbed80ec2cf6ed182b48a346 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 19:48:53 +0100
Subject: [PATCH 10/22] Fix comment typo

---
 clang/lib/Headers/avx10_2convertintrin.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 622be0ac0b1113..2fdc3a2a0666b1 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -24,7 +24,7 @@
   __attribute__((__always_inline__, __nodebug__, __target__("avx10.2-256"),    \
                  __min_vector_width__(256)))
 
-//clang-format off
+// clang-format off
 
 /// Convert two 128-bit vectors, \a __A and \a __B, containing packed
 ///    single-precision (32-bit) floating-point elements to a 128-bit vector

>From bf0067be8b30b7b598d18d2828c19ff325c21177 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 19:50:04 +0100
Subject: [PATCH 11/22] FP16 -> half

---
 clang/lib/Headers/avx10_2convertintrin.h | 248 +++++++++++------------
 1 file changed, 124 insertions(+), 124 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 2fdc3a2a0666b1..dfad1147db4d54 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -51,7 +51,7 @@
 /// \param __B
 ///    A 128-bit vector of [4 x float].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Lower elements correspond to the
+///    A 128-bit vector of [8 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
@@ -86,7 +86,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
@@ -94,7 +94,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// \param __B
 ///    A 128-bit vector of [4 x float].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Lower elements correspond to the
+///    A 128-bit vector of [8 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -136,7 +136,7 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 /// \param __B
 ///    A 128-bit vector of [4 x float].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Lower elements correspond to the
+///    A 128-bit vector of [8 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then zero is taken instead.
@@ -171,7 +171,7 @@ _mm_maskz_cvtx2ps_ph(__mmask8 __U, __m128 __A, __m128 __B) {
 /// \param __B
 ///    A 256-bit vector of [8 x float].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
+///    A 256-bit vector of [16 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
@@ -207,7 +207,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -215,7 +215,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtx2ps_ph(__m256 __A,
 /// \param __B
 ///    A 256-bit vector of [8 x float].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
+///    A 256-bit vector of [16 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -258,7 +258,7 @@ _mm256_mask_cvtx2ps_ph(__m256h __W, __mmask16 __U, __m256 __A, __m256 __B) {
 /// \param __B
 ///    A 256-bit vector of [8 x float].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
+///    A 256-bit vector of [16 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then zero is taken instead.
@@ -299,7 +299,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
+///    A 256-bit vector of [16 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
 #define _mm256_cvtx_round2ps_ph(A, B, R)                                       \
@@ -334,7 +334,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// This intrinsic corresponds to the \c VCVT2PS2PHX instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
@@ -347,7 +347,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
+///    A 256-bit vector of [16 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
@@ -392,7 +392,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    _MM_FROUND_TO_NEAREST_INT, _MM_FROUND_TO_NEG_INF, _MM_FROUND_TO_POS_INF,
 ///    _MM_FROUND_TO_ZERO.
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Lower elements correspond to the
+///    A 256-bit vector of [16 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then zero is taken instead.
@@ -418,7 +418,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -457,7 +457,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -495,7 +495,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -526,7 +526,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -566,7 +566,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -604,7 +604,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -636,7 +636,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8 instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -675,7 +675,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -713,7 +713,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -745,7 +745,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2BF8S instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -785,7 +785,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -823,7 +823,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -854,7 +854,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -893,7 +893,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -931,7 +931,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -960,7 +960,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8 instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -998,7 +998,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -1034,7 +1034,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -1066,7 +1066,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8S`instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -1105,7 +1105,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -1143,7 +1143,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
 ///    A 128-bit vector of [8 x int16].
 /// \returns
@@ -1175,7 +1175,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTBIASPH2HF8S instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -1215,7 +1215,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -1253,7 +1253,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
 ///    A 256-bit vector of [16 x int16].
 /// \returns
@@ -1287,9 +1287,9 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1330,9 +1330,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1372,9 +1372,9 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1407,9 +1407,9 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1450,9 +1450,9 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1492,9 +1492,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// \param __U
 ///    A 32-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1528,9 +1528,9 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1571,9 +1571,9 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower 8 elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1613,9 +1613,9 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1649,9 +1649,9 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1692,9 +1692,9 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1734,9 +1734,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// \param __U
 ///    A 32-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1769,9 +1769,9 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1812,9 +1812,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1854,9 +1854,9 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1889,9 +1889,9 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8 instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1932,9 +1932,9 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -1974,9 +1974,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// \param __U
 ///    A 32-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -2010,9 +2010,9 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -2053,9 +2053,9 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -2095,9 +2095,9 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -2131,9 +2131,9 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2HF8S instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -2174,9 +2174,9 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// \param __U
 ///    A 32-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -2216,9 +2216,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// \param __U
 ///    A 32-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 256-bit vector of [32 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
@@ -2249,7 +2249,7 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \param __A
 ///    A 128-bit vector of [16 x hf8].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    A 128-bit vector of [8 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
   return (__m128h)__builtin_ia32_vcvthf8_2ph128_mask(
@@ -2278,13 +2278,13 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// This intrinsic corresponds to the \c VCVTHF82PH instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
 ///    A 128-bit vector of [16 x hf8].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    A 128-bit vector of [8 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
@@ -2319,7 +2319,7 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \param __A
 ///    A 128-bit vector of [16 x hf8].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    A 128-bit vector of [8 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
@@ -2346,7 +2346,7 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 /// \param __A
 ///    A 256-bit vector of [32 x hf8].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    A 256-bit vector of [16 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
 _mm256_cvtnehf8_ph(__m128i __A) {
@@ -2376,13 +2376,13 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// This intrinsic corresponds to the \c VCVTHF82PH instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [32 x hf8].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    A 256-bit vector of [16 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
@@ -2417,7 +2417,7 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// \param __A
 ///    A 256-bit vector of [32 x hf8].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    A 256-bit vector of [16 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
@@ -2443,7 +2443,7 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 /// This intrinsic corresponds to the \c VCVTNEPH2BF8 instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
@@ -2478,7 +2478,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -2513,7 +2513,7 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -2540,7 +2540,7 @@ _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
@@ -2575,7 +2575,7 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [8 x fp16].
+///    A 256-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
@@ -2609,7 +2609,7 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
@@ -2637,7 +2637,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
@@ -2672,7 +2672,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -2707,7 +2707,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -2734,7 +2734,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
@@ -2770,7 +2770,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [8 x fp16].
+///    A 256-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
@@ -2805,7 +2805,7 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
@@ -2833,7 +2833,7 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNEPH2HF8 instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
@@ -2868,7 +2868,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -2903,7 +2903,7 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -2930,7 +2930,7 @@ _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8 instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
@@ -2965,7 +2965,7 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [8 x fp16].
+///    A 256-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
@@ -2999,7 +2999,7 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
@@ -3027,7 +3027,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 /// This intrinsic corresponds to the \c VCVTNEPH2HF8S instruction.
 ///
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the (converted)
 ///    elements from \a __A; upper elements are zeroed. 
@@ -3062,7 +3062,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -3097,7 +3097,7 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \param __U
 ///    A 8-bit zeroing mask.
 /// \param __A
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    (converted) elements from \a __A; upper elements are zeroed. If
@@ -3124,7 +3124,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 /// This intrinsic corresponds to the \c VCVTNE2PH2BF8S instruction.
 ///
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the (converted)
 ///    elements from \a __A.
@@ -3160,7 +3160,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
-///    A 256-bit vector of [8 x fp16].
+///    A 256-bit vector of [8 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If
@@ -3195,7 +3195,7 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \param __U
 ///    A 16-bit zeroing mask.
 /// \param __A
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
@@ -3222,7 +3222,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 /// \param __A
 ///    A 128-bit vector of [16 x hf8].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    A 128-bit vector of [8 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
   return _mm_castsi128_ph(_mm_slli_epi16(_mm_cvtepi8_epi16(__A), 8));
@@ -3248,13 +3248,13 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 /// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __W
-///    A 128-bit vector of [8 x fp16].
+///    A 128-bit vector of [8 x half].
 /// \param __U
 ///    A 8-bit merging mask.
 /// \param __A
 ///    A 128-bit vector of [16 x hf8].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    A 128-bit vector of [8 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
@@ -3287,7 +3287,7 @@ _mm_mask_cvtpbf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \param __A
 ///    A 128-bit vector of [16 x hf8].
 /// \returns
-///    A 128-bit vector of [8 x fp16]. Resulting elements correspond to the
+///    A 128-bit vector of [8 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
 static __inline__ __m128h __DEFAULT_FN_ATTRS128
@@ -3311,7 +3311,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 /// \param __A
 ///    A 256-bit vector of [32 x hf8].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    A 256-bit vector of [16 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
   return _mm256_castsi256_ph(_mm256_slli_epi16(_mm256_cvtepi8_epi16(__A), 8));
@@ -3337,13 +3337,13 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 /// This intrinsic does not correspond to a single instruction.
 ///
 /// \param __W
-///    A 256-bit vector of [16 x fp16].
+///    A 256-bit vector of [16 x half].
 /// \param __U
 ///    A 16-bit merging mask.
 /// \param __A
 ///    A 256-bit vector of [32 x hf8].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    A 256-bit vector of [16 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
@@ -3376,7 +3376,7 @@ _mm256_mask_cvtpbf8_ph(__m256h __W, __mmask8 __U, __m128i __A) {
 /// \param __A
 ///    A 256-bit vector of [32 x hf8].
 /// \returns
-///    A 256-bit vector of [16 x fp16]. Resulting elements correspond to the
+///    A 256-bit vector of [16 x half]. Resulting elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256

>From 750d07d44cdfcaf5583ae6ded7660a13e61e914a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 19:52:36 +0100
Subject: [PATCH 12/22] Prepend arguments name for defines with __

---
 clang/lib/Headers/avx10_2convertintrin.h | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index dfad1147db4d54..670d0ab3979156 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -302,10 +302,10 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    A 256-bit vector of [16 x half]. Lower elements correspond to the
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A.
-#define _mm256_cvtx_round2ps_ph(A, B, R)                                       \
+#define _mm256_cvtx_round2ps_ph(__A, __B, __R)                                       \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
-      (__v8sf)(A), (__v8sf)(B), (__v16hf)_mm256_undefined_ph(),                \
-      (__mmask16)(-1), (const int)(R)))
+      (__v8sf)(__A), (__v8sf)(__B), (__v16hf)_mm256_undefined_ph(),                \
+      (__mmask16)(-1), (const int)(__R)))
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
 ///    single-precision (32-bit) floating-point elements to a 256-bit vector
@@ -351,9 +351,9 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
-#define _mm256_mask_cvtx_round2ps_ph(W, U, A, B, R)                            \
+#define _mm256_mask_cvtx_round2ps_ph(__W, __U, __A, __B, __R)                            \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
-      (__v8sf)(A), (__v8sf)(B), (__v16hf)(W), (__mmask16)(U), (const int)(R)))
+      (__v8sf)(__A), (__v8sf)(__B), (__v16hf)(__W), (__mmask16)(__U), (const int)(__R)))
 
 /// Convert two 256-bit vectors, \a __A and \a __B, containing packed
 ///    single-precision (32-bit) floating-point elements to a 256-bit vector
@@ -396,10 +396,10 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///    (converted) elements from \a __B; higher order elements correspond to the
 ///    (converted) elements from \a __A. If corresponding mask bit is not set,
 ///    then zero is taken instead.
-#define _mm256_maskz_cvtx_round2ps_ph(U, A, B, R)                              \
+#define _mm256_maskz_cvtx_round2ps_ph(__U, __A, __B, __R)                              \
   ((__m256h)__builtin_ia32_vcvt2ps2phx256_mask(                                \
-      (__v8sf)(A), (__v8sf)(B), (__v16hf)(_mm256_setzero_ph()),                \
-      (__mmask16)(U), (const int)(R)))
+      (__v8sf)(__A), (__v8sf)(__B), (__v16hf)(_mm256_setzero_ph()),                \
+      (__mmask16)(__U), (const int)(__R)))
 
 /// Add two 128-bit vectors, \a __A and \a __B, containing packed FP16
 ///    floating-point elements and 8-bit integers stored in the lower half of

>From 82f633e8f5f401276c25a559e52aa559237d7456 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 19:53:06 +0100
Subject: [PATCH 13/22] int16 -> i16

---
 clang/lib/Headers/avx10_2convertintrin.h | 48 ++++++++++++------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 670d0ab3979156..ae0b9e965f1c50 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -420,7 +420,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
@@ -459,7 +459,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -497,7 +497,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -528,7 +528,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
@@ -568,7 +568,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
@@ -606,7 +606,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,
@@ -638,7 +638,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
@@ -677,7 +677,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -715,7 +715,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -747,7 +747,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
@@ -787,7 +787,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
@@ -825,7 +825,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x bf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,
@@ -856,7 +856,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
@@ -895,7 +895,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -933,7 +933,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -962,7 +962,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
@@ -1000,7 +1000,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
@@ -1036,7 +1036,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,
@@ -1068,7 +1068,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the
 ///    sum of elements from \a __A and \a __B; higher order elements are zeroed.
@@ -1107,7 +1107,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -1145,7 +1145,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 128-bit vector of [8 x half].
 /// \param __B
-///    A 128-bit vector of [8 x int16].
+///    A 128-bit vector of [8 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Lower elements correspond to the sum of
 ///    elements from \a __A and \a __B; higher order elements are zeroed. If
@@ -1177,7 +1177,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Elements correspond to the
 ///    sum of elements from \a __A and \a __B.
@@ -1217,7 +1217,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set, then
@@ -1255,7 +1255,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \param __A
 ///    A 256-bit vector of [16 x half].
 /// \param __B
-///    A 256-bit vector of [16 x int16].
+///    A 256-bit vector of [16 x i16].
 /// \returns
 ///    A 128-bit vector of [16 x hf8]. Elements correspond to the sum of
 ///    elements from \a __A and \a __B. If corresponding mask bit is not set,

>From 67d89f2c1e9df8872f1f18b9ab5d1234e27af0a7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 19:55:06 +0100
Subject: [PATCH 14/22] 2i -> 2 * i

---
 clang/lib/Headers/avx10_2convertintrin.h | 48 ++++++++++++------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index ae0b9e965f1c50..d2aa33735b8a20 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -407,7 +407,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -439,7 +439,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -479,7 +479,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -515,7 +515,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -548,7 +548,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -588,7 +588,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -625,7 +625,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -657,7 +657,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -697,7 +697,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -734,7 +734,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -767,7 +767,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -807,7 +807,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -843,7 +843,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -875,7 +875,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -915,7 +915,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -951,7 +951,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 /// \endcode
 ///
@@ -982,7 +982,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1020,7 +1020,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -1055,7 +1055,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -1087,7 +1087,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1127,7 +1127,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -1164,7 +1164,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -1197,7 +1197,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1237,7 +1237,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2i])
+///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI

>From 21bd118429af547c863b50ace78678f1491c72a0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 20:06:20 +0100
Subject: [PATCH 15/22] Correct name of funciton in cvtbias operations

---
 clang/lib/Headers/avx10_2convertintrin.h | 48 ++++++++++++------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index d2aa33735b8a20..06228b3f9ccb54 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -407,7 +407,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -439,7 +439,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -479,7 +479,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -515,7 +515,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -548,7 +548,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -588,7 +588,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -625,7 +625,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -657,7 +657,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -697,7 +697,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -734,7 +734,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -767,7 +767,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -807,7 +807,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -843,7 +843,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -875,7 +875,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -915,7 +915,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -951,7 +951,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 /// \endcode
 ///
@@ -982,7 +982,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1020,7 +1020,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -1055,7 +1055,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -1087,7 +1087,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1127,7 +1127,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -1164,7 +1164,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -1197,7 +1197,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1237,7 +1237,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_convert_fp16_to_fp8_bias(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI

>From 6e876aa86a9dee7c9db1ac9ebf947ff146c066d8 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 20:54:39 +0100
Subject: [PATCH 16/22] change fp8 -> hf8/bf8 in convert functions

---
 clang/lib/Headers/avx10_2convertintrin.h | 168 +++++++++++------------
 1 file changed, 84 insertions(+), 84 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 06228b3f9ccb54..60494357d45be9 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -1273,9 +1273,9 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -1309,9 +1309,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
@@ -1353,9 +1353,9 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := 0
@@ -1393,9 +1393,9 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31 
 /// 	IF i < 16 
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -1429,9 +1429,9 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31 
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -1475,9 +1475,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// 		dst.fp8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -1514,9 +1514,9 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -1550,9 +1550,9 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
@@ -1594,9 +1594,9 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := 0
@@ -1635,9 +1635,9 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF i < 16 
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -1671,9 +1671,9 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -1717,9 +1717,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// 		dst.fp8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -1755,9 +1755,9 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -1791,9 +1791,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
@@ -1835,9 +1835,9 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := 0
@@ -1875,9 +1875,9 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF i < 16 
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -1911,9 +1911,9 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -1957,9 +1957,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// 		dst.fp8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -1996,9 +1996,9 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -2032,9 +2032,9 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
@@ -2076,9 +2076,9 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := 0
@@ -2117,9 +2117,9 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 16
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -2153,9 +2153,9 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -2199,9 +2199,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// 		dst.fp8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -2236,7 +2236,7 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2264,7 +2264,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -2301,7 +2301,7 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI
@@ -2333,7 +2333,7 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
 /// ENDFOR
 ///
 /// dst[MAX:256] := 0
@@ -2362,7 +2362,7 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__A.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -2399,7 +2399,7 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI
@@ -2432,7 +2432,7 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -2460,7 +2460,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2499,7 +2499,7 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// 	IF __U[i]
 /// 		dst.fp8[i] := 0
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	FI
 /// ENDFOR
 ///
@@ -2529,7 +2529,7 @@ _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2557,7 +2557,7 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2593,7 +2593,7 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -2626,7 +2626,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -2654,7 +2654,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2691,7 +2691,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -2723,7 +2723,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2752,7 +2752,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2789,7 +2789,7 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -2822,7 +2822,7 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -2850,7 +2850,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2887,7 +2887,7 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -2919,7 +2919,7 @@ _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2947,7 +2947,7 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2983,7 +2983,7 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -3016,7 +3016,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -3044,7 +3044,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -3081,7 +3081,7 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -3113,7 +3113,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -3142,7 +3142,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -3179,7 +3179,7 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_fp8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -3211,7 +3211,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
 /// ENDFOR
 /// \endcode
 ///
@@ -3236,7 +3236,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -3271,7 +3271,7 @@ _mm_mask_cvtpbf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI
@@ -3300,7 +3300,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
 /// ENDFOR
 /// \endcode
 ///
@@ -3325,7 +3325,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -3360,7 +3360,7 @@ _mm256_mask_cvtpbf8_ph(__m256h __W, __mmask8 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_fp8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI

>From 487339926351c3830a2bb9a82cb22d61d8e37892 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Sun, 22 Dec 2024 21:46:53 +0100
Subject: [PATCH 17/22] Add saturaion info to funcitions

---
 clang/lib/Headers/avx10_2convertintrin.h | 92 ++++++++++++------------
 1 file changed, 46 insertions(+), 46 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 60494357d45be9..ca6fa434eb4f95 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -625,7 +625,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -657,7 +657,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -697,7 +697,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -734,7 +734,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -767,7 +767,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -807,7 +807,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -1055,7 +1055,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_to_saturate_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -1087,7 +1087,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1127,7 +1127,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -1164,7 +1164,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -1197,7 +1197,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
@@ -1237,7 +1237,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
 ///	 	dst.fp8[i] := 0
 ///	 FI
@@ -1514,9 +1514,9 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -1550,9 +1550,9 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
@@ -1594,9 +1594,9 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := 0
@@ -1635,9 +1635,9 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF i < 16 
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -1671,9 +1671,9 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -1717,9 +1717,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// 		dst.fp8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -1996,9 +1996,9 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -2076,9 +2076,9 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := 0
@@ -2117,9 +2117,9 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 16
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -2153,9 +2153,9 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
 /// 		dst.fp8[i] := __W.fp8[i]
@@ -2199,9 +2199,9 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// 		dst.fp8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
+/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -2626,7 +2626,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -2654,7 +2654,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2691,7 +2691,7 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -2723,7 +2723,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2752,7 +2752,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -2789,7 +2789,7 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -3016,7 +3016,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -3044,7 +3044,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -3081,7 +3081,7 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI
@@ -3113,7 +3113,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 	dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -3142,7 +3142,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8 := __W.fp8[i]
 /// 	FI
@@ -3179,7 +3179,7 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
 /// 		dst.fp8[i] := 0
 /// 	FI

>From d59fdd043dfa4ce6246502088a3ed04d7a55581b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Miko=C5=82aj=20Pir=C3=B3g?= <mikolajpirog at gmail.com>
Date: Mon, 23 Dec 2024 19:23:49 +0100
Subject: [PATCH 18/22] Reviewer suggestions

---
 clang/lib/Headers/avx10_2convertintrin.h | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index ca6fa434eb4f95..77e0a5411e04db 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -38,8 +38,9 @@
 /// 		dst.fp16[i] := convert_fp32_to_fp16(__A.fp32[i - 4])
 /// 	FI
 ///
-/// dst[MAX:127] := 0
 /// ENDFOR
+///
+/// dst[MAX:127] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -76,9 +77,9 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
+/// ENDFOR
 ///
 /// dst[MAX:127] := 0
-/// ENDFOR
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -953,6 +954,8 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -987,6 +990,8 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// 		dst.fp8[i] := _W[i]
 /// 	FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -1025,6 +1030,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 ///	 	dst.fp8[i] := 0
 ///	 FI
 /// ENDFOR
+///
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -3347,7 +3354,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    element from \a __W is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtpbf8_ph(__m256h __W, __mmask8 __U, __m128i __A) {
+_mm256_mask_cvtpbf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
   return _mm256_castsi256_ph(
       _mm256_mask_slli_epi16((__m256i)__W, __U, _mm256_cvtepi8_epi16(__A), 8));
 }
@@ -3380,7 +3387,7 @@ _mm256_mask_cvtpbf8_ph(__m256h __W, __mmask8 __U, __m128i __A) {
 ///    (converted) elements from \a __A. If corresponding mask bit is not set, then
 ///    zero is taken instead.
 static __inline__ __m256h __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
+_mm256_maskz_cvtpbf8_ph(__mmask16 __U, __m128i __A) {
   return _mm256_castsi256_ph(
       _mm256_slli_epi16(_mm256_maskz_cvtepi8_epi16(__U, __A), 8));
 }

>From dbffb089ddcc799253e61da771e84ad588106f88 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Thu, 2 Jan 2025 08:48:56 -0800
Subject: [PATCH 19/22] Fix typos

---
 clang/lib/Headers/avx10_2convertintrin.h | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 77e0a5411e04db..fb89ae759f3977 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -40,7 +40,7 @@
 ///
 /// ENDFOR
 ///
-/// dst[MAX:127] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -79,7 +79,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtx2ps_ph(__m128 __A,
 /// 	FI
 /// ENDFOR
 ///
-/// dst[MAX:127] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>
@@ -123,7 +123,7 @@ _mm_mask_cvtx2ps_ph(__m128h __W, __mmask8 __U, __m128 __A, __m128 __B) {
 /// 	FI
 /// ENDFOR
 ///
-/// dst[MAX:127] := 0
+/// dst[MAX:128] := 0
 /// \endcode
 ///
 /// \headerfile <immintrin.h>

>From 637545e0393ca9da5712cd00096039ee56477d75 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Thu, 2 Jan 2025 09:30:30 -0800
Subject: [PATCH 20/22] Replace dst.fp8 with dst.hf8/bf8

---
 clang/lib/Headers/avx10_2convertintrin.h | 288 +++++++++++------------
 1 file changed, 144 insertions(+), 144 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index fb89ae759f3977..39d150c69792e2 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -408,7 +408,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -440,9 +440,9 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.bf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -480,9 +480,9 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.bf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -516,7 +516,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -549,9 +549,9 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.bf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -589,9 +589,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.bf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -626,7 +626,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -658,9 +658,9 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.bf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -698,9 +698,9 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.bf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -735,7 +735,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -768,9 +768,9 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.bf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -808,9 +808,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.bf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -844,7 +844,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -876,9 +876,9 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.hf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -916,9 +916,9 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.hf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -952,7 +952,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -985,9 +985,9 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.hf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1025,9 +1025,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.hf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -1062,7 +1062,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := add_fp16_int8_convert_to_saturate_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_int8_convert_to_saturate_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -1094,9 +1094,9 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.hf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1134,9 +1134,9 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.hf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -1171,7 +1171,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -1204,9 +1204,9 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 /// 	ELSE
-/// 		dst.fp8[i] := _W[i]
+/// 		dst.hf8[i] := _W[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1244,9 +1244,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.fp8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
 ///	 ELSE
-///	 	dst.fp8[i] := 0
+///	 	dst.hf8[i] := 0
 ///	 FI
 /// ENDFOR
 ///
@@ -1280,9 +1280,9 @@ _mm256_maskz_cvtbiassph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -1316,12 +1316,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.bf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1360,12 +1360,12 @@ _mm_mask_cvtne2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -1400,9 +1400,9 @@ _mm_maskz_cvtne2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31 
 /// 	IF i < 16 
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -1436,12 +1436,12 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31 
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := __W.fp8[i]
+/// 		dst.bf8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1479,12 +1479,12 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 31 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
+/// 			dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -1521,9 +1521,9 @@ _mm256_maskz_cvtne2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -1557,12 +1557,12 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.bf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1601,12 +1601,12 @@ _mm_mask_cvtnes2ph_pbf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -1642,9 +1642,9 @@ _mm_maskz_cvtnes2ph_pbf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF i < 16 
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -1678,12 +1678,12 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := __W.fp8[i]
+/// 		dst.bf8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1721,12 +1721,12 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF __U[i]
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
+/// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -1762,9 +1762,9 @@ _mm256_maskz_cvtnes2ph_pbf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -1798,12 +1798,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.hf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1842,12 +1842,12 @@ _mm_mask_cvtne2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -1882,9 +1882,9 @@ _mm_maskz_cvtne2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF i < 16 
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -1918,12 +1918,12 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := __W.fp8[i]
+/// 		dst.hf8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1961,12 +1961,12 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtne2ph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 31
 /// 	IF __U[i]
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -2003,9 +2003,9 @@ _mm256_maskz_cvtne2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 8
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 8])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 8])
 /// 	FI
 /// ENDFOR
 ///
@@ -2039,12 +2039,12 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
+/// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.hf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2083,12 +2083,12 @@ _mm_mask_cvtnes2ph_phf8(__m128i __W, __mmask16 __U, __m128h __A, __m128h __B) {
 /// FOR i := 0 to 15
 /// 	IF __U[i]
 /// 		IF i < 8
-/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 8])
+/// 			dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -2124,9 +2124,9 @@ _mm_maskz_cvtnes2ph_phf8(__mmask16 __U, __m128h __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF i < 16
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
 /// 	FI
 /// ENDFOR
 ///
@@ -2160,12 +2160,12 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// FOR i := 0 to 31
 /// 	IF __U[i]
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
+/// 			dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.fp8[i] := __W.fp8[i]
+/// 		dst.hf8[i] := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2203,12 +2203,12 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtnes2ph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 31 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	ELSE
 /// 		IF i < 16 
-/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
+/// 			dst.hf8[i] := convert_saturate_fp16_to_hf8(__B.fp16[i])
 /// 		ELSE
-/// 			dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
+/// 			dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	FI
 /// ENDFOR
@@ -2439,7 +2439,7 @@ _mm256_maskz_cvtnehf8_ph(__mmask16 __U, __m128i __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 	dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -2467,9 +2467,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.bf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2504,9 +2504,9 @@ _mm_mask_cvtneph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	ELSE
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	FI
 /// ENDFOR
 ///
@@ -2536,7 +2536,7 @@ _mm_maskz_cvtneph_pbf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 	dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2564,9 +2564,9 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.bf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2600,9 +2600,9 @@ _mm256_mask_cvtneph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -2633,7 +2633,7 @@ _mm256_maskz_cvtneph_pbf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
+/// 	dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -2661,9 +2661,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.bf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2698,9 +2698,9 @@ _mm_mask_cvtnesph_pbf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -2730,7 +2730,7 @@ _mm_maskz_cvtnesph_pbf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
+/// 	dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2759,9 +2759,9 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.bf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2796,9 +2796,9 @@ _mm256_mask_cvtnesph_pbf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
+/// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.bf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -2829,7 +2829,7 @@ _mm256_maskz_cvtnesph_pbf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 	dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -2857,9 +2857,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.hf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2894,9 +2894,9 @@ _mm_mask_cvtneph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -2926,7 +2926,7 @@ _mm_maskz_cvtneph_phf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 	dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2954,9 +2954,9 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.hf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2990,9 +2990,9 @@ _mm256_mask_cvtneph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -3023,7 +3023,7 @@ _mm256_maskz_cvtneph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
+/// 	dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -3051,9 +3051,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.hf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -3088,9 +3088,9 @@ _mm_mask_cvtnesph_phf8(__m128i __W, __mmask8 __U, __m128h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///
@@ -3120,7 +3120,7 @@ _mm_maskz_cvtnesph_phf8(__mmask8 __U, __m128h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
+/// 	dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -3149,9 +3149,9 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8 := __W.fp8[i]
+/// 		dst.hf8 := __W.fp8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -3186,9 +3186,9 @@ _mm256_mask_cvtnesph_phf8(__m128i __W, __mmask16 __U, __m256h __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
+/// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.fp8[i] := 0
+/// 		dst.hf8[i] := 0
 /// 	FI
 /// ENDFOR
 ///

>From 9f2e8f4c031d1c8088c0716cd2814c1df627e885 Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Fri, 3 Jan 2025 08:26:17 -0800
Subject: [PATCH 21/22] Replace all fp8 with either bf8 or hf8

---
 clang/lib/Headers/avx10_2convertintrin.h | 56 ++++++++++++------------
 1 file changed, 28 insertions(+), 28 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index 39d150c69792e2..f81691b3801435 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -1321,7 +1321,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_pbf8(__m128h __A,
 /// 			dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.bf8 := __W.fp8[i]
+/// 		dst.bf8 := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1441,7 +1441,7 @@ _mm256_cvtne2ph_pbf8(__m256h __A, __m256h __B) {
 /// 			dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.bf8[i] := __W.fp8[i]
+/// 		dst.bf8[i] := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1562,7 +1562,7 @@ _mm_cvtnes2ph_pbf8(__m128h __A, __m128h __B) {
 /// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.bf8 := __W.fp8[i]
+/// 		dst.bf8 := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1683,7 +1683,7 @@ _mm256_cvtnes2ph_pbf8(__m256h __A, __m256h __B) {
 /// 			dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.bf8[i] := __W.fp8[i]
+/// 		dst.bf8[i] := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1803,7 +1803,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtne2ph_phf8(__m128h __A,
 /// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.hf8 := __W.fp8[i]
+/// 		dst.hf8 := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -1923,7 +1923,7 @@ _mm256_cvtne2ph_phf8(__m256h __A, __m256h __B) {
 /// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.hf8[i] := __W.fp8[i]
+/// 		dst.hf8[i] := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2044,7 +2044,7 @@ _mm_cvtnes2ph_phf8(__m128h __A, __m128h __B) {
 /// 			dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i - 8])
 /// 		FI
 /// 	ELSE
-/// 		dst.hf8 := __W.fp8[i]
+/// 		dst.hf8 := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2165,7 +2165,7 @@ _mm256_cvtnes2ph_phf8(__m256h __A, __m256h __B) {
 /// 			dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i - 16])
 /// 		FI
 /// 	ELSE
-/// 		dst.hf8[i] := __W.fp8[i]
+/// 		dst.hf8[i] := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2243,7 +2243,7 @@ _mm256_maskz_cvtnes2ph_phf8(__mmask32 __U, __m256h __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__A.hf8[i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -2271,7 +2271,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtnehf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -2308,7 +2308,7 @@ _mm_mask_cvtnehf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI
@@ -2340,7 +2340,7 @@ _mm_maskz_cvtnehf8_ph(__mmask8 __U, __m128i __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__A.hf8[i])
 /// ENDFOR
 ///
 /// dst[MAX:256] := 0
@@ -2369,7 +2369,7 @@ _mm256_cvtnehf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__A.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -2406,7 +2406,7 @@ _mm256_mask_cvtnehf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI
@@ -2469,7 +2469,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_pbf8(__m128h __A) {
 /// 	IF __U[i]
 /// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.bf8 := __W.fp8[i]
+/// 		dst.bf8 := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2566,7 +2566,7 @@ _mm256_cvtneph_pbf8(__m256h __A) {
 /// 	IF __U[i]
 /// 		dst.bf8[i] := convert_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.bf8 := __W.fp8[i]
+/// 		dst.bf8 := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2663,7 +2663,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_pbf8(__m128h __A) {
 /// 	IF __U[i]
 /// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.bf8 := __W.fp8[i]
+/// 		dst.bf8 := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2761,7 +2761,7 @@ _mm256_cvtnesph_pbf8(__m256h __A) {
 /// 	IF __U[i]
 /// 		dst.bf8[i] := convert_saturate_fp16_to_bf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.bf8 := __W.fp8[i]
+/// 		dst.bf8 := __W.bf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2859,7 +2859,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtneph_phf8(__m128h __A) {
 /// 	IF __U[i]
 /// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.hf8 := __W.fp8[i]
+/// 		dst.hf8 := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -2956,7 +2956,7 @@ _mm256_cvtneph_phf8(__m256h __A) {
 /// 	IF __U[i]
 /// 		dst.hf8[i] := convert_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.hf8 := __W.fp8[i]
+/// 		dst.hf8 := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -3053,7 +3053,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_cvtnesph_phf8(__m128h __A) {
 /// 	IF __U[i]
 /// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.hf8 := __W.fp8[i]
+/// 		dst.hf8 := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -3151,7 +3151,7 @@ _mm256_cvtnesph_phf8(__m256h __A) {
 /// 	IF __U[i]
 /// 		dst.hf8[i] := convert_saturate_fp16_to_hf8(__A.fp16[i])
 /// 	ELSE
-/// 		dst.hf8 := __W.fp8[i]
+/// 		dst.hf8 := __W.hf8[i]
 /// 	FI
 /// ENDFOR
 ///
@@ -3218,7 +3218,7 @@ _mm256_maskz_cvtnesph_phf8(__mmask16 __U, __m256h __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__B.hf8[i])
 /// ENDFOR
 /// \endcode
 ///
@@ -3243,7 +3243,7 @@ static __inline__ __m128h __DEFAULT_FN_ATTRS128 _mm_cvtpbf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -3278,7 +3278,7 @@ _mm_mask_cvtpbf8_ph(__m128h __W, __mmask8 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI
@@ -3307,7 +3307,7 @@ _mm_maskz_cvtpbf8_ph(__mmask8 __U, __m128i __A) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
+/// 	dst.fp16[i] := convert_hf8_to_fp16(__B.hf8[i])
 /// ENDFOR
 /// \endcode
 ///
@@ -3332,7 +3332,7 @@ static __inline__ __m256h __DEFAULT_FN_ATTRS256 _mm256_cvtpbf8_ph(__m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := __W.fp16[i]
 /// 	FI
@@ -3367,7 +3367,7 @@ _mm256_mask_cvtpbf8_ph(__m256h __W, __mmask16 __U, __m128i __A) {
 /// \code{.operation}
 /// FOR i := 0 to 15 
 /// 	IF __U[i]
-/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.fp8[i])
+/// 		dst.fp16[i] := convert_hf8_to_fp16(__B.hf8[i])
 /// 	ELSE
 /// 		dst.fp16[i] := 0
 /// 	FI

>From d1b6208b66c1d83fce6bec2be8747127bba4298f Mon Sep 17 00:00:00 2001
From: "Pirog, Mikolaj Maciej" <mikolaj.maciej.pirog at intel.com>
Date: Fri, 3 Jan 2025 08:40:54 -0800
Subject: [PATCH 22/22] int8 -> uint8

---
 clang/lib/Headers/avx10_2convertintrin.h | 48 ++++++++++++------------
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/clang/lib/Headers/avx10_2convertintrin.h b/clang/lib/Headers/avx10_2convertintrin.h
index f81691b3801435..7fc255f5474c84 100644
--- a/clang/lib/Headers/avx10_2convertintrin.h
+++ b/clang/lib/Headers/avx10_2convertintrin.h
@@ -408,7 +408,7 @@ _mm256_maskz_cvtx2ps_ph(__mmask16 __U, __m256 __A, __m256 __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_uint8_convert_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -440,7 +440,7 @@ _mm_cvtbiasph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_uint8_convert_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.bf8[i] := _W[i]
 /// 	FI
@@ -480,7 +480,7 @@ _mm_mask_cvtbiasph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_uint8_convert_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.bf8[i] := 0
 ///	 FI
@@ -516,7 +516,7 @@ _mm_maskz_cvtbiasph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_uint8_convert_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -549,7 +549,7 @@ _mm256_cvtbiasph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_uint8_convert_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.bf8[i] := _W[i]
 /// 	FI
@@ -589,7 +589,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.bf8[i] := add_fp16_int8_convert_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_uint8_convert_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.bf8[i] := 0
 ///	 FI
@@ -626,7 +626,7 @@ _mm256_maskz_cvtbiasph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_uint8_convert_saturate_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -658,7 +658,7 @@ _mm_cvtbiassph_pbf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_uint8_convert_saturate_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.bf8[i] := _W[i]
 /// 	FI
@@ -698,7 +698,7 @@ _mm_mask_cvtbiassph_pbf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_uint8_convert_saturate_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.bf8[i] := 0
 ///	 FI
@@ -735,7 +735,7 @@ _mm_maskz_cvtbiassph_pbf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.bf8[i] := add_fp16_uint8_convert_saturate_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -768,7 +768,7 @@ _mm256_cvtbiassph_pbf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.bf8[i] := add_fp16_uint8_convert_saturate_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.bf8[i] := _W[i]
 /// 	FI
@@ -808,7 +808,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_pbf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.bf8[i] := add_fp16_int8_convert_saturate_to_bf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.bf8[i] := add_fp16_uint8_convert_saturate_to_bf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.bf8[i] := 0
 ///	 FI
@@ -844,7 +844,7 @@ _mm256_maskz_cvtbiassph_pbf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_uint8_convert_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -876,7 +876,7 @@ _mm_cvtbiasph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_uint8_convert_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.hf8[i] := _W[i]
 /// 	FI
@@ -916,7 +916,7 @@ _mm_mask_cvtbiasph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_uint8_convert_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.hf8[i] := 0
 ///	 FI
@@ -952,7 +952,7 @@ _mm_maskz_cvtbiasph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_uint8_convert_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -985,7 +985,7 @@ _mm256_cvtbiasph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_uint8_convert_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.hf8[i] := _W[i]
 /// 	FI
@@ -1025,7 +1025,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiasph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.hf8[i] := add_fp16_int8_convert_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_uint8_convert_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.hf8[i] := 0
 ///	 FI
@@ -1062,7 +1062,7 @@ _mm256_maskz_cvtbiasph_phf8(__mmask16 __U, __m256i __A, __m256h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 7
-/// 	dst.hf8[i] := add_fp16_int8_convert_to_saturate_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_uint8_convert_to_saturate_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:64] := 0
@@ -1094,7 +1094,7 @@ _mm_cvtbiassph_phf8(__m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-/// 		dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_uint8_convert_saturate_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.hf8[i] := _W[i]
 /// 	FI
@@ -1134,7 +1134,7 @@ _mm_mask_cvtbiassph_phf8(__m128i __W, __mmask8 __U, __m128i __A, __m128h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 7
 /// 	IF __U[i]
-///	 	dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_uint8_convert_saturate_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.hf8[i] := 0
 ///	 FI
@@ -1171,7 +1171,7 @@ _mm_maskz_cvtbiassph_phf8(__mmask8 __U, __m128i __A, __m128h __B) {
 ///
 /// \code{.operation}
 /// FOR i := 0 to 15
-/// 	dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 	dst.hf8[i] := add_fp16_uint8_convert_saturate_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// ENDFOR
 ///
 /// dst[MAX:128] := 0
@@ -1204,7 +1204,7 @@ _mm256_cvtbiassph_phf8(__m256i __A, __m256h __B) {
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-/// 		dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+/// 		dst.hf8[i] := add_fp16_uint8_convert_saturate_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 /// 	ELSE
 /// 		dst.hf8[i] := _W[i]
 /// 	FI
@@ -1244,7 +1244,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS256 _mm256_mask_cvtbiassph_phf8(
 /// \code{.operation}
 /// FOR i := 0 to 15
 /// 	IF __U[i]
-///	 	dst.hf8[i] := add_fp16_int8_convert_saturate_to_hf8(__A.fp16[i], __B.int8[2 * i])
+///	 	dst.hf8[i] := add_fp16_uint8_convert_saturate_to_hf8(__A.fp16[i], __B.uint8[2 * i])
 ///	 ELSE
 ///	 	dst.hf8[i] := 0
 ///	 FI



More information about the cfe-commits mailing list