[clang] 68a8880 - [X86] Add reduce_*_ep[i|u]8/16 series intrinsics.
Freddy Ye via cfe-commits
cfe-commits at lists.llvm.org
Thu Dec 22 22:55:04 PST 2022
Author: Freddy Ye
Date: 2022-12-23T14:54:53+08:00
New Revision: 68a888012b12f9846131baae2533927617a58584
URL: https://github.com/llvm/llvm-project/commit/68a888012b12f9846131baae2533927617a58584
DIFF: https://github.com/llvm/llvm-project/commit/68a888012b12f9846131baae2533927617a58584.diff
LOG: [X86] Add reduce_*_ep[i|u]8/16 series intrinsics.
Reviewed By: pengfei, skan
Differential Revision: https://reviews.llvm.org/D140531
Added:
clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
Modified:
clang/docs/ReleaseNotes.rst
clang/lib/Headers/avx512vlbwintrin.h
Removed:
################################################################################
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 4ba28f5df5dc1..8c288a98eec99 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -803,6 +803,8 @@ X86 Support in Clang
- ``-march=raptorlake`` and ``-march=meteorlake`` are now supported.
- ``-march=sierraforest``, ``-march=graniterapids`` and ``-march=grandridge`` are now supported.
- Lift _BitInt() supported max width from 128 to 8388608.
+- Support intrinsics of ``_mm(256)_reduce_(add|mul|or|and)_epi8/16``.
+- Support intrinsics of ``_mm(256)_reduce_(max|min)_ep[i|u]8/16``.
WebAssembly Support in Clang
----------------------------
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 521ccab27e043..a40ee824ef210 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -2803,6 +2803,358 @@ _mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
(__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
(__v16hi)_mm256_setzero_si256()))
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_add_epi16(__m128i __W) {
+ return __builtin_reduce_add((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_mul_epi16(__m128i __W) {
+ return __builtin_reduce_mul((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_and_epi16(__m128i __W) {
+ return __builtin_reduce_and((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_or_epi16(__m128i __W) {
+ return __builtin_reduce_or((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_add_epi16( __mmask8 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_add((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_mul_epi16( __mmask8 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi16(_mm_set1_epi16(1), __M, __W);
+ return __builtin_reduce_mul((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_and_epi16( __mmask8 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __W);
+ return __builtin_reduce_and((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_or((__v8hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epi16(__m128i __V) {
+ return __builtin_reduce_max((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epu16(__m128i __V) {
+ return __builtin_reduce_max((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epi16(__m128i __V) {
+ return __builtin_reduce_min((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epu16(__m128i __V) {
+ return __builtin_reduce_min((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767-1), __M, __V);
+ return __builtin_reduce_max((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epu16(__mmask16 __M, __m128i __V) {
+ __V = _mm_maskz_mov_epi16(__M, __V);
+ return __builtin_reduce_max((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epi16(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi16(_mm_set1_epi16(32767), __M, __V);
+ return __builtin_reduce_min((__v8hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epu16(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi16(_mm_set1_epi16(-1), __M, __V);
+ return __builtin_reduce_min((__v8hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_add_epi16(__m256i __W) {
+ return __builtin_reduce_add((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_mul_epi16(__m256i __W) {
+ return __builtin_reduce_mul((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_and_epi16(__m256i __W) {
+ return __builtin_reduce_and((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_or_epi16(__m256i __W) {
+ return __builtin_reduce_or((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_add_epi16( __mmask16 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_add((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_mul_epi16( __mmask16 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(1), __M, __W);
+ return __builtin_reduce_mul((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_and_epi16( __mmask16 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __W);
+ return __builtin_reduce_and((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi16(__M, __W);
+ return __builtin_reduce_or((__v16hi)__W);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epi16(__m256i __V) {
+ return __builtin_reduce_max((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epu16(__m256i __V) {
+ return __builtin_reduce_max((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epi16(__m256i __V) {
+ return __builtin_reduce_min((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epu16(__m256i __V) {
+ return __builtin_reduce_min((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767-1), __M, __V);
+ return __builtin_reduce_max((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_maskz_mov_epi16(__M, __V);
+ return __builtin_reduce_max((__v16hu)__V);
+}
+
+static __inline__ short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(32767), __M, __V);
+ return __builtin_reduce_min((__v16hi)__V);
+}
+
+static __inline__ unsigned short __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-1), __M, __V);
+ return __builtin_reduce_min((__v16hu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_add_epi8(__m128i __W) {
+ return __builtin_reduce_add((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_mul_epi8(__m128i __W) {
+ return __builtin_reduce_mul((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_and_epi8(__m128i __W) {
+ return __builtin_reduce_and((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_or_epi8(__m128i __W) {
+ return __builtin_reduce_or((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_add_epi8( __mmask16 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_add((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_mul_epi8( __mmask16 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi8(_mm_set1_epi8(1), __M, __W);
+ return __builtin_reduce_mul((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_and_epi8( __mmask16 __M, __m128i __W) {
+ __W = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __W);
+ return __builtin_reduce_and((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W) {
+ __W = _mm_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_or((__v16qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epi8(__m128i __V) {
+ return __builtin_reduce_max((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_reduce_max_epu8(__m128i __V) {
+ return __builtin_reduce_max((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epi8(__m128i __V) {
+ return __builtin_reduce_min((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_reduce_min_epu8(__m128i __V) {
+ return __builtin_reduce_min((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi8(_mm_set1_epi8(-127-1), __M, __V);
+ return __builtin_reduce_max((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __V) {
+ __V = _mm_maskz_mov_epi8(__M, __V);
+ return __builtin_reduce_max((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi8(_mm_set1_epi8(127), __M, __V);
+ return __builtin_reduce_min((__v16qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS128
+_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __V) {
+ __V = _mm_mask_mov_epi8(_mm_set1_epi8(-1), __M, __V);
+ return __builtin_reduce_min((__v16qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_add_epi8(__m256i __W) {
+ return __builtin_reduce_add((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_mul_epi8(__m256i __W) {
+ return __builtin_reduce_mul((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_and_epi8(__m256i __W) {
+ return __builtin_reduce_and((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_or_epi8(__m256i __W) {
+ return __builtin_reduce_or((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_add_epi8( __mmask32 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_add((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_mul_epi8( __mmask32 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(1), __M, __W);
+ return __builtin_reduce_mul((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_and_epi8( __mmask32 __M, __m256i __W) {
+ __W = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __W);
+ return __builtin_reduce_and((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W) {
+ __W = _mm256_maskz_mov_epi8(__M, __W);
+ return __builtin_reduce_or((__v32qs)__W);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epi8(__m256i __V) {
+ return __builtin_reduce_max((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_reduce_max_epu8(__m256i __V) {
+ return __builtin_reduce_max((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epi8(__m256i __V) {
+ return __builtin_reduce_min((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_reduce_min_epu8(__m256i __V) {
+ return __builtin_reduce_min((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127-1), __M, __V);
+ return __builtin_reduce_max((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_maskz_mov_epi8(__M, __V);
+ return __builtin_reduce_max((__v32qu)__V);
+}
+
+static __inline__ signed char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(127), __M, __V);
+ return __builtin_reduce_min((__v32qs)__V);
+}
+
+static __inline__ unsigned char __DEFAULT_FN_ATTRS256
+_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __V) {
+ __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-1), __M, __V);
+ return __builtin_reduce_min((__v32qu)__V);
+}
+
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
diff --git a/clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c b/clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
new file mode 100644
index 0000000000000..0a1692001efa6
--- /dev/null
+++ b/clang/test/CodeGen/X86/avx512vlbw-reduceIntrin.c
@@ -0,0 +1,420 @@
+// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=x86_64 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+// RUN: %clang_cc1 -ffreestanding %s -O0 -triple=i386 -target-feature +avx512bw -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s
+
+#include <immintrin.h>
+
+short test_mm_reduce_add_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_add_epi16(
+// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_add_epi16(__W);
+}
+
+short test_mm_reduce_mul_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_mul_epi16(
+// CHECK: call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_mul_epi16(__W);
+}
+
+short test_mm_reduce_or_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_or_epi16(
+// CHECK: call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_or_epi16(__W);
+}
+
+short test_mm_reduce_and_epi16(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_and_epi16(
+// CHECK: call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_and_epi16(__W);
+}
+
+short test_mm_mask_reduce_add_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_add_epi16(
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> %{{.*}})
+ return _mm_mask_reduce_add_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_mul_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_mul_epi16(
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.mul.v8i16(<8 x i16> %{{.*}})
+ return _mm_mask_reduce_mul_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_and_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_and_epi16(
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.and.v8i16(<8 x i16> %{{.*}}
+ return _mm_mask_reduce_and_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_or_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_or_epi16(
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.or.v8i16(<8 x i16> %{{.*}})
+ return _mm_mask_reduce_or_epi16(__M, __W);
+}
+
+short test_mm256_reduce_add_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_add_epi16(
+// CHECK: call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_add_epi16(__W);
+}
+
+short test_mm256_reduce_mul_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_mul_epi16(
+// CHECK: call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_mul_epi16(__W);
+}
+
+short test_mm256_reduce_or_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_or_epi16(
+// CHECK: call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_or_epi16(__W);
+}
+
+short test_mm256_reduce_and_epi16(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_and_epi16(
+// CHECK: call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_and_epi16(__W);
+}
+
+short test_mm256_mask_reduce_add_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_add_epi16(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_add_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_mul_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_mul_epi16(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.mul.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_mul_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_and_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_and_epi16(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.and.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_and_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_or_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_or_epi16(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.or.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_or_epi16(__M, __W);
+}
+
+signed char test_mm_reduce_add_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_add_epi8(
+// CHECK: call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_add_epi8(__W);
+}
+
+signed char test_mm_reduce_mul_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_mul_epi8(
+// CHECK: call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_mul_epi8(__W);
+}
+
+signed char test_mm_reduce_and_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_and_epi8(
+// CHECK: call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_and_epi8(__W);
+}
+
+signed char test_mm_reduce_or_epi8(__m128i __W){
+// CHECK-LABEL: @test_mm_reduce_or_epi8(
+// CHECK: call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_or_epi8(__W);
+}
+
+signed char test_mm_mask_reduce_add_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_add_epi8(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_add_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_mul_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_mul_epi8(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.mul.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_mul_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_and_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_and_epi8(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.and.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_and_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_or_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: @test_mm_mask_reduce_or_epi8(
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.or.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_or_epi8(__M, __W);
+}
+
+signed char test_mm256_reduce_add_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_add_epi8(
+// CHECK: call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_add_epi8(__W);
+}
+
+signed char test_mm256_reduce_mul_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_mul_epi8(
+// CHECK: call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_mul_epi8(__W);
+}
+
+signed char test_mm256_reduce_and_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_and_epi8(
+// CHECK: call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_and_epi8(__W);
+}
+
+signed char test_mm256_reduce_or_epi8(__m256i __W){
+// CHECK-LABEL: @test_mm256_reduce_or_epi8(
+// CHECK: call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_or_epi8(__W);
+}
+
+signed char test_mm256_mask_reduce_add_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_add_epi8(
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.add.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_add_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_mul_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_mul_epi8(
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.mul.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_mul_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_and_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_and_epi8(
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.and.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_and_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_or_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: @test_mm256_mask_reduce_or_epi8(
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.or.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_or_epi8(__M, __W);
+}
+
+short test_mm_reduce_max_epi16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epi16
+// CHECK: call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_max_epi16(__W);
+}
+
+short test_mm_reduce_min_epi16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epi16
+// CHECK: call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_min_epi16(__W);
+}
+
+unsigned short test_mm_reduce_max_epu16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epu16
+// CHECK: call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_max_epu16(__W);
+}
+
+unsigned short test_mm_reduce_min_epu16(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epu16
+// CHECK: call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
+ return _mm_reduce_min_epu16(__W);
+}
+
+short test_mm_mask_reduce_max_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epi16
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %{{.*}})
+ return _mm_mask_reduce_max_epi16(__M, __W);
+}
+
+short test_mm_mask_reduce_min_epi16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epi16
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %{{.*}})
+ return _mm_mask_reduce_min_epi16(__M, __W);
+}
+
+unsigned short test_mm_mask_reduce_max_epu16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epu16
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %{{.*}})
+ return _mm_mask_reduce_max_epu16(__M, __W);
+}
+
+unsigned short test_mm_mask_reduce_min_epu16(__mmask8 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epu16
+// CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %{{.*}})
+ return _mm_mask_reduce_min_epu16(__M, __W);
+}
+
+short test_mm256_reduce_max_epi16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epi16
+// CHECK: call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_max_epi16(__W);
+}
+
+short test_mm256_reduce_min_epi16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epi16
+// CHECK: call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_min_epi16(__W);
+}
+
+unsigned short test_mm256_reduce_max_epu16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epu16
+// CHECK: call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_max_epu16(__W);
+}
+
+unsigned short test_mm256_reduce_min_epu16(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epu16
+// CHECK: call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
+ return _mm256_reduce_min_epu16(__W);
+}
+
+short test_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epi16
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_max_epi16(__M, __W);
+}
+
+short test_mm256_mask_reduce_min_epi16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epi16
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_min_epi16(__M, __W);
+}
+
+unsigned short test_mm256_mask_reduce_max_epu16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epu16
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_max_epu16(__M, __W);
+}
+
+unsigned short test_mm256_mask_reduce_min_epu16(__mmask16 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epu16
+// CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
+// CHECK: call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %{{.*}})
+ return _mm256_mask_reduce_min_epu16(__M, __W);
+}
+
+signed char test_mm_reduce_max_epi8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epi8
+// CHECK: call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_max_epi8(__W);
+}
+
+signed char test_mm_reduce_min_epi8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epi8
+// CHECK: call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_min_epi8(__W);
+}
+
+unsigned char test_mm_reduce_max_epu8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_max_epu8
+// CHECK: call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_max_epu8(__W);
+}
+
+unsigned char test_mm_reduce_min_epu8(__m128i __W){
+// CHECK-LABEL: test_mm_reduce_min_epu8
+// CHECK: call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
+ return _mm_reduce_min_epu8(__W);
+}
+
+signed char test_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epi8
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_max_epi8(__M, __W);
+}
+
+signed char test_mm_mask_reduce_min_epi8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epi8
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_min_epi8(__M, __W);
+}
+
+unsigned char test_mm_mask_reduce_max_epu8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_max_epu8
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_max_epu8(__M, __W);
+}
+
+unsigned char test_mm_mask_reduce_min_epu8(__mmask16 __M, __m128i __W){
+// CHECK-LABEL: test_mm_mask_reduce_min_epu8
+// CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %{{.*}})
+ return _mm_mask_reduce_min_epu8(__M, __W);
+}
+
+signed char test_mm256_reduce_max_epi8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epi8
+// CHECK: call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_max_epi8(__W);
+}
+
+signed char test_mm256_reduce_min_epi8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epi8
+// CHECK: call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_min_epi8(__W);
+}
+
+unsigned char test_mm256_reduce_max_epu8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_max_epu8
+// CHECK: call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_max_epu8(__W);
+}
+
+unsigned char test_mm256_reduce_min_epu8(__m256i __W){
+// CHECK-LABEL: test_mm256_reduce_min_epu8
+// CHECK: call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
+ return _mm256_reduce_min_epu8(__W);
+}
+
+signed char test_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epi8
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_max_epi8(__M, __W);
+}
+
+signed char test_mm256_mask_reduce_min_epi8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epi8
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_min_epi8(__M, __W);
+}
+
+unsigned char test_mm256_mask_reduce_max_epu8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_max_epu8
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_max_epu8(__M, __W);
+}
+
+unsigned char test_mm256_mask_reduce_min_epu8(__mmask32 __M, __m256i __W){
+// CHECK-LABEL: test_mm256_mask_reduce_min_epu8
+// CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
+// CHECK: call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %{{.*}})
+ return _mm256_mask_reduce_min_epu8(__M, __W);
+}
More information about the cfe-commits
mailing list