[clang] 5945e97 - [clang][BFloat] Add reinterpret cast intrinsics
Ties Stuij via cfe-commits
cfe-commits at lists.llvm.org
Sun Jun 7 06:32:49 PDT 2020
Author: Ties Stuij
Date: 2020-06-07T14:32:37+01:00
New Revision: 5945e9799e77c30baffd0da4a9b735262cda3361
URL: https://github.com/llvm/llvm-project/commit/5945e9799e77c30baffd0da4a9b735262cda3361
DIFF: https://github.com/llvm/llvm-project/commit/5945e9799e77c30baffd0da4a9b735262cda3361.diff
LOG: [clang][BFloat] Add reinterpret cast intrinsics
Summary:
This patch is part of a series implementing the Bfloat16 extension of the
Armv8.6-a architecture, as detailed here:
https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/arm-architecture-developments-armv8-6-a
The bfloat type, and its properties is specified in the Arm C language
extension specification:
https://developer.arm.com/docs/ihi0055/d/procedure-call-standard-for-the-arm-64-bit-architecture
Subscribers: kristof.beyls, ilya-biryukov, MaskRay, jkorous, arphaman, kadircet, usaxena95, cfe-commits
Tags: #clang
Differential Revision: https://reviews.llvm.org/D79869
The following people contributed to this patch:
- Luke Cheeseman
- Alexandros Lamprineas
- Luke Geeson
- Ties Stuij
Added:
clang/test/CodeGen/aarch64-bf16-reinterpret-intrinsics.c
clang/test/CodeGen/arm-bf16-reinterpret-intrinsics.c
Modified:
clang/include/clang/Basic/arm_neon.td
clang/include/clang/Basic/arm_neon_incl.td
clang/utils/TableGen/NeonEmitter.cpp
Removed:
################################################################################
diff --git a/clang/include/clang/Basic/arm_neon.td b/clang/include/clang/Basic/arm_neon.td
index 98fda8b13142..12481cfb145d 100644
--- a/clang/include/clang/Basic/arm_neon.td
+++ b/clang/include/clang/Basic/arm_neon.td
@@ -635,11 +635,23 @@ def VZIP : WInst<"vzip", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
def VUZP : WInst<"vuzp", "2..", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
////////////////////////////////////////////////////////////////////////////////
+
+class REINTERPRET_CROSS_SELF<string Types> :
+ NoTestOpInst<"vreinterpret", "..", Types, OP_REINT> {
+ let CartesianProductWith = Types;
+}
+
+multiclass REINTERPRET_CROSS_TYPES<string TypesA, string TypesB> {
+ def AXB: NoTestOpInst<"vreinterpret", "..", TypesA, OP_REINT> {
+ let CartesianProductWith = TypesB;
+ }
+ def BXA: NoTestOpInst<"vreinterpret", "..", TypesB, OP_REINT> {
+ let CartesianProductWith = TypesA;
+ }
+}
+
// E.3.31 Vector reinterpret cast operations
-def VREINTERPRET
- : NoTestOpInst<"vreinterpret", "..",
- "csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs", OP_REINT> {
- let CartesianProductOfTypes = 1;
+def VREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs"> {
let ArchGuard = "!defined(__aarch64__)";
let BigEndianSafe = 1;
}
@@ -1188,12 +1200,9 @@ def VQTBX4_A64 : WInst<"vqtbx4", "..(4Q)U", "UccPcQUcQcQPc">;
// NeonEmitter implicitly takes the cartesian product of the type string with
// itself during generation so, unlike all other intrinsics, this one should
// include *all* types, not just additional ones.
-def VVREINTERPRET
- : NoTestOpInst<"vreinterpret", "..",
- "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", OP_REINT> {
- let CartesianProductOfTypes = 1;
- let BigEndianSafe = 1;
+def VVREINTERPRET : REINTERPRET_CROSS_SELF<"csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk"> {
let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)";
+ let BigEndianSafe = 1;
}
////////////////////////////////////////////////////////////////////////////////
@@ -1891,3 +1900,17 @@ let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarc
let isLaneQ = 1;
}
}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)" in {
+ let BigEndianSafe = 1 in {
+ defm VREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
+ "csilUcUsUiUlhfPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQPcQPsQPl", "bQb">;
+ }
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16) && defined(__aarch64__)" in {
+ let BigEndianSafe = 1 in {
+ defm VVREINTERPRET_BF : REINTERPRET_CROSS_TYPES<
+ "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", "bQb">;
+ }
+}
diff --git a/clang/include/clang/Basic/arm_neon_incl.td b/clang/include/clang/Basic/arm_neon_incl.td
index a1031fe4ad4f..dd20b70433ef 100644
--- a/clang/include/clang/Basic/arm_neon_incl.td
+++ b/clang/include/clang/Basic/arm_neon_incl.td
@@ -267,7 +267,6 @@ class Inst <string n, string p, string t, Operation o> {
string ArchGuard = "";
Operation Operation = o;
- bit CartesianProductOfTypes = 0;
bit BigEndianSafe = 0;
bit isShift = 0;
bit isScalarShift = 0;
@@ -289,6 +288,8 @@ class Inst <string n, string p, string t, Operation o> {
// this. Ex: vset_lane which outputs vmov instructions.
bit isHiddenWInst = 0;
bit isHiddenLInst = 0;
+
+ string CartesianProductWith = "";
}
// The following instruction classes are implemented via builtins.
diff --git a/clang/test/CodeGen/aarch64-bf16-reinterpret-intrinsics.c b/clang/test/CodeGen/aarch64-bf16-reinterpret-intrinsics.c
new file mode 100644
index 000000000000..3c91b8daf130
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-bf16-reinterpret-intrinsics.c
@@ -0,0 +1,333 @@
+// RUN: %clang_cc1 -triple aarch64-arm-none-eabi -target-feature +neon -target-feature +bf16 \
+// RUN: -disable-O0-optnone -S -emit-llvm -o - %s \
+// RUN: | opt -S -mem2reg \
+// RUN: | FileCheck %s
+
+// REQUIRES: aarch64-registered-target
+
+#include <arm_neon.h>
+
+// CHECK-LABEL: @test_vreinterpret_bf16_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s8(int8x8_t a) { return vreinterpret_bf16_s8(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s16(int16x4_t a) { return vreinterpret_bf16_s16(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s32(int32x2_t a) { return vreinterpret_bf16_s32(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_f32(float32x2_t a) { return vreinterpret_bf16_f32(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u8(uint8x8_t a) { return vreinterpret_bf16_u8(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u16(uint16x4_t a) { return vreinterpret_bf16_u16(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u32(uint32x2_t a) { return vreinterpret_bf16_u32(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_p8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_p8(poly8x8_t a) { return vreinterpret_bf16_p8(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_p16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_p16(poly16x4_t a) { return vreinterpret_bf16_p16(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u64(uint64x1_t a) { return vreinterpret_bf16_u64(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_s64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s64(int64x1_t a) { return vreinterpret_bf16_s64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s8(int8x16_t a) { return vreinterpretq_bf16_s8(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s16(int16x8_t a) { return vreinterpretq_bf16_s16(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s32(int32x4_t a) { return vreinterpretq_bf16_s32(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_f32(float32x4_t a) { return vreinterpretq_bf16_f32(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u8(uint8x16_t a) { return vreinterpretq_bf16_u8(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u16(uint16x8_t a) { return vreinterpretq_bf16_u16(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u32(uint32x4_t a) { return vreinterpretq_bf16_u32(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_p8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_p8(poly8x16_t a) { return vreinterpretq_bf16_p8(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_p16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_p16(poly16x8_t a) { return vreinterpretq_bf16_p16(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u64(uint64x2_t a) { return vreinterpretq_bf16_u64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s64(int64x2_t a) { return vreinterpretq_bf16_s64(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_p64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_p64(poly64x1_t a) { return vreinterpret_bf16_p64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_p64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_p64(poly64x2_t a) { return vreinterpretq_bf16_p64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_p128(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast i128 [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_p128(poly128_t a) { return vreinterpretq_bf16_p128(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_f64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x double> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_f64(float64x1_t a) { return vreinterpret_bf16_f64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_f64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x double> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_f64(float64x2_t a) { return vreinterpretq_bf16_f64(a); }
+// CHECK-LABEL: @test_vreinterpret_s8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT: ret <8 x i8> [[TMP0]]
+//
+int8x8_t test_vreinterpret_s8_bf16(bfloat16x4_t a) { return vreinterpret_s8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_s16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
+// CHECK-NEXT: ret <4 x i16> [[TMP0]]
+//
+int16x4_t test_vreinterpret_s16_bf16(bfloat16x4_t a) { return vreinterpret_s16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_s32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x i32>
+// CHECK-NEXT: ret <2 x i32> [[TMP0]]
+//
+int32x2_t test_vreinterpret_s32_bf16(bfloat16x4_t a) { return vreinterpret_s32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_f32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x float>
+// CHECK-NEXT: ret <2 x float> [[TMP0]]
+//
+float32x2_t test_vreinterpret_f32_bf16(bfloat16x4_t a) { return vreinterpret_f32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT: ret <8 x i8> [[TMP0]]
+//
+uint8x8_t test_vreinterpret_u8_bf16(bfloat16x4_t a) { return vreinterpret_u8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
+// CHECK-NEXT: ret <4 x i16> [[TMP0]]
+//
+uint16x4_t test_vreinterpret_u16_bf16(bfloat16x4_t a) { return vreinterpret_u16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x i32>
+// CHECK-NEXT: ret <2 x i32> [[TMP0]]
+//
+uint32x2_t test_vreinterpret_u32_bf16(bfloat16x4_t a) { return vreinterpret_u32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_p8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT: ret <8 x i8> [[TMP0]]
+//
+poly8x8_t test_vreinterpret_p8_bf16(bfloat16x4_t a) { return vreinterpret_p8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_p16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
+// CHECK-NEXT: ret <4 x i16> [[TMP0]]
+//
+poly16x4_t test_vreinterpret_p16_bf16(bfloat16x4_t a) { return vreinterpret_p16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
+// CHECK-NEXT: ret <1 x i64> [[TMP0]]
+//
+uint64x1_t test_vreinterpret_u64_bf16(bfloat16x4_t a) { return vreinterpret_u64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_s64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
+// CHECK-NEXT: ret <1 x i64> [[TMP0]]
+//
+int64x1_t test_vreinterpret_s64_bf16(bfloat16x4_t a) { return vreinterpret_s64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_p64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
+// CHECK-NEXT: ret <1 x i64> [[TMP0]]
+//
+poly64x1_t test_vreinterpret_p64_bf16(bfloat16x4_t a) { return vreinterpret_p64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vreinterpretq_s8_bf16(bfloat16x8_t a) { return vreinterpretq_s8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vreinterpretq_s16_bf16(bfloat16x8_t a) { return vreinterpretq_s16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+int32x4_t test_vreinterpretq_s32_bf16(bfloat16x8_t a) { return vreinterpretq_s32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_f32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x float>
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vreinterpretq_f32_bf16(bfloat16x8_t a) { return vreinterpretq_f32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vreinterpretq_u8_bf16(bfloat16x8_t a) { return vreinterpretq_u8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vreinterpretq_u16_bf16(bfloat16x8_t a) { return vreinterpretq_u16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[TMP0]]
+//
+uint32x4_t test_vreinterpretq_u32_bf16(bfloat16x8_t a) { return vreinterpretq_u32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_p8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+poly8x16_t test_vreinterpretq_p8_bf16(bfloat16x8_t a) { return vreinterpretq_p8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_p16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+poly16x8_t test_vreinterpretq_p16_bf16(bfloat16x8_t a) { return vreinterpretq_p16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
+// CHECK-NEXT: ret <2 x i64> [[TMP0]]
+//
+uint64x2_t test_vreinterpretq_u64_bf16(bfloat16x8_t a) { return vreinterpretq_u64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
+// CHECK-NEXT: ret <2 x i64> [[TMP0]]
+//
+int64x2_t test_vreinterpretq_s64_bf16(bfloat16x8_t a) { return vreinterpretq_s64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_p64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
+// CHECK-NEXT: ret <2 x i64> [[TMP0]]
+//
+poly64x2_t test_vreinterpretq_p64_bf16(bfloat16x8_t a) { return vreinterpretq_p64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_p128_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to i128
+// CHECK-NEXT: ret i128 [[TMP0]]
+//
+poly128_t test_vreinterpretq_p128_bf16(bfloat16x8_t a) { return vreinterpretq_p128_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_f64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x double>
+// CHECK-NEXT: ret <1 x double> [[TMP0]]
+//
+float64x1_t test_vreinterpret_f64_bf16(bfloat16x4_t a) { return vreinterpret_f64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_f64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x double>
+// CHECK-NEXT: ret <2 x double> [[TMP0]]
+//
+float64x2_t test_vreinterpretq_f64_bf16(bfloat16x8_t a) { return vreinterpretq_f64_bf16(a); }
diff --git a/clang/test/CodeGen/arm-bf16-reinterpret-intrinsics.c b/clang/test/CodeGen/arm-bf16-reinterpret-intrinsics.c
new file mode 100644
index 000000000000..cde043a59a50
--- /dev/null
+++ b/clang/test/CodeGen/arm-bf16-reinterpret-intrinsics.c
@@ -0,0 +1,314 @@
+// RUN: %clang_cc1 -triple armv8.2a-arm-none-eabi -target-feature +neon -target-feature +bf16 -mfloat-abi hard \
+// RUN: -disable-O0-optnone -S -emit-llvm -o - %s \
+// RUN: | opt -S -instcombine \
+// RUN: | FileCheck %s
+
+// REQUIRES: arm-registered-target
+
+#include <arm_neon.h>
+
+// CHECK-LABEL: @test_vreinterpret_bf16_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s8(int8x8_t a) { return vreinterpret_bf16_s8(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s16(int16x4_t a) { return vreinterpret_bf16_s16(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[A:%.*]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s32(int32x2_t a) { return vreinterpret_bf16_s32(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_f32(float32x2_t a) { return vreinterpret_bf16_f32(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u8(uint8x8_t a) { return vreinterpret_bf16_u8(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u16(uint16x4_t a) { return vreinterpret_bf16_u16(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i32> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[A:%.*]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u32(uint32x2_t a) { return vreinterpret_bf16_u32(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_p8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_p8(poly8x8_t a) { return vreinterpret_bf16_p8(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_p16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_p16(poly16x4_t a) { return vreinterpret_bf16_p16(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_u64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_u64(uint64x1_t a) { return vreinterpret_bf16_u64(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_s64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_s64(int64x1_t a) { return vreinterpret_bf16_s64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s8(int8x16_t a) { return vreinterpretq_bf16_s8(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s16(int16x8_t a) { return vreinterpretq_bf16_s16(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[A:%.*]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s32(int32x4_t a) { return vreinterpretq_bf16_s32(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_f32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_f32(float32x4_t a) { return vreinterpretq_bf16_f32(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u8(uint8x16_t a) { return vreinterpretq_bf16_u8(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u16(uint16x8_t a) { return vreinterpretq_bf16_u16(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u32(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[A:%.*]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u32(uint32x4_t a) { return vreinterpretq_bf16_u32(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_p8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_p8(poly8x16_t a) { return vreinterpretq_bf16_p8(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_p16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_p16(poly16x8_t a) { return vreinterpretq_bf16_p16(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_u64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_u64(uint64x2_t a) { return vreinterpretq_bf16_u64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_s64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_s64(int64x2_t a) { return vreinterpretq_bf16_s64(a); }
+// CHECK-LABEL: @test_vreinterpret_bf16_p64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT: ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vreinterpret_bf16_p64(poly64x1_t a) { return vreinterpret_bf16_p64(a); }
+// CHECK-LABEL: @test_vreinterpretq_bf16_p64(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
+// CHECK-NEXT: ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vreinterpretq_bf16_p64(poly64x2_t a) { return vreinterpretq_bf16_p64(a); }
+
+// TODO: poly128_t not implemented on aarch32
+// CHCK-LABEL: @test_vreinterpretq_bf16_p128(
+// CHCK-NEXT: entry:
+// CHCK-NEXT: [[TMP0:%.*]] = bitcast i128 [[A:%.*]] to <4 x i32>
+// CHCK-NEXT: ret <4 x i32> [[TMP0]]
+//
+//bfloat16x8_t test_vreinterpretq_bf16_p128(poly128_t a) { return vreinterpretq_bf16_p128(a); }
+
+// CHECK-LABEL: @test_vreinterpret_s8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT: ret <8 x i8> [[TMP0]]
+//
+int8x8_t test_vreinterpret_s8_bf16(bfloat16x4_t a) { return vreinterpret_s8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_s16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
+// CHECK-NEXT: ret <4 x i16> [[TMP0]]
+//
+int16x4_t test_vreinterpret_s16_bf16(bfloat16x4_t a) { return vreinterpret_s16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_s32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x i32>
+// CHECK-NEXT: ret <2 x i32> [[A:%.*]]
+//
+int32x2_t test_vreinterpret_s32_bf16(bfloat16x4_t a) { return vreinterpret_s32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_f32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x float>
+// CHECK-NEXT: ret <2 x float> [[TMP0]]
+//
+float32x2_t test_vreinterpret_f32_bf16(bfloat16x4_t a) { return vreinterpret_f32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT: ret <8 x i8> [[TMP0]]
+//
+uint8x8_t test_vreinterpret_u8_bf16(bfloat16x4_t a) { return vreinterpret_u8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
+// CHECK-NEXT: ret <4 x i16> [[TMP0]]
+//
+uint16x4_t test_vreinterpret_u16_bf16(bfloat16x4_t a) { return vreinterpret_u16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x i32>
+// CHECK-NEXT: ret <2 x i32> [[A:%.*]]
+//
+uint32x2_t test_vreinterpret_u32_bf16(bfloat16x4_t a) { return vreinterpret_u32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_p8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
+// CHECK-NEXT: ret <8 x i8> [[TMP0]]
+//
+poly8x8_t test_vreinterpret_p8_bf16(bfloat16x4_t a) { return vreinterpret_p8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_p16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
+// CHECK-NEXT: ret <4 x i16> [[TMP0]]
+//
+poly16x4_t test_vreinterpret_p16_bf16(bfloat16x4_t a) { return vreinterpret_p16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_u64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
+// CHECK-NEXT: ret <1 x i64> [[TMP0]]
+//
+uint64x1_t test_vreinterpret_u64_bf16(bfloat16x4_t a) { return vreinterpret_u64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_s64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
+// CHECK-NEXT: ret <1 x i64> [[TMP0]]
+//
+int64x1_t test_vreinterpret_s64_bf16(bfloat16x4_t a) { return vreinterpret_s64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpret_p64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
+// CHECK-NEXT: ret <1 x i64> [[TMP0]]
+//
+poly64x1_t test_vreinterpret_p64_bf16(bfloat16x4_t a) { return vreinterpret_p64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+int8x16_t test_vreinterpretq_s8_bf16(bfloat16x8_t a) { return vreinterpretq_s8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+int16x8_t test_vreinterpretq_s16_bf16(bfloat16x8_t a) { return vreinterpretq_s16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[A:%.*]]
+//
+int32x4_t test_vreinterpretq_s32_bf16(bfloat16x8_t a) { return vreinterpretq_s32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_f32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x float>
+// CHECK-NEXT: ret <4 x float> [[TMP0]]
+//
+float32x4_t test_vreinterpretq_f32_bf16(bfloat16x8_t a) { return vreinterpretq_f32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+uint8x16_t test_vreinterpretq_u8_bf16(bfloat16x8_t a) { return vreinterpretq_u8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+uint16x8_t test_vreinterpretq_u16_bf16(bfloat16x8_t a) { return vreinterpretq_u16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u32_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x i32>
+// CHECK-NEXT: ret <4 x i32> [[A:%.*]]
+//
+uint32x4_t test_vreinterpretq_u32_bf16(bfloat16x8_t a) { return vreinterpretq_u32_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_p8_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
+// CHECK-NEXT: ret <16 x i8> [[TMP0]]
+//
+poly8x16_t test_vreinterpretq_p8_bf16(bfloat16x8_t a) { return vreinterpretq_p8_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_p16_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
+// CHECK-NEXT: ret <8 x i16> [[TMP0]]
+//
+poly16x8_t test_vreinterpretq_p16_bf16(bfloat16x8_t a) { return vreinterpretq_p16_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_u64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
+// CHECK-NEXT: ret <2 x i64> [[TMP0]]
+//
+uint64x2_t test_vreinterpretq_u64_bf16(bfloat16x8_t a) { return vreinterpretq_u64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_s64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
+// CHECK-NEXT: ret <2 x i64> [[TMP0]]
+//
+int64x2_t test_vreinterpretq_s64_bf16(bfloat16x8_t a) { return vreinterpretq_s64_bf16(a); }
+// CHECK-LABEL: @test_vreinterpretq_p64_bf16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
+// CHECK-NEXT: ret <2 x i64> [[TMP0]]
+//
+poly64x2_t test_vreinterpretq_p64_bf16(bfloat16x8_t a) { return vreinterpretq_p64_bf16(a); }
+
+// TODO: poly128_t not implemented on aarch32
+// CHCK-LABEL: @test_vreinterpretq_p128_bf16(
+// CHCK-NEXT: entry:
+// CHCK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to i128
+// CHCK-NEXT: ret i128 [[TMP0]]
+//
+//poly128_t test_vreinterpretq_p128_bf16(bfloat16x8_t a) { return vreinterpretq_p128_bf16(a); }
diff --git a/clang/utils/TableGen/NeonEmitter.cpp b/clang/utils/TableGen/NeonEmitter.cpp
index f94166590bd3..7494f05c85fd 100644
--- a/clang/utils/TableGen/NeonEmitter.cpp
+++ b/clang/utils/TableGen/NeonEmitter.cpp
@@ -311,7 +311,7 @@ class Intrinsic {
/// The unmangled name.
std::string Name;
/// The input and output typespecs. InTS == OutTS except when
- /// CartesianProductOfTypes is 1 - this is the case for vreinterpret.
+ /// CartesianProductWith is non-empty - this is the case for vreinterpret.
TypeSpec OutTS, InTS;
/// The base class kind. Most intrinsics use ClassS, which has full type
/// info for integers (s32/u32). Some use ClassI, which doesn't care about
@@ -344,7 +344,7 @@ class Intrinsic {
/// The set of intrinsics that this intrinsic uses/requires.
std::set<Intrinsic *> Dependencies;
/// The "base type", which is Type('d', OutTS). InBaseType is only
- ///
diff erent if CartesianProductOfTypes = 1 (for vreinterpret).
+ ///
diff erent if CartesianProductWith is non-empty (for vreinterpret).
Type BaseType, InBaseType;
/// The return variable.
Variable RetVar;
@@ -1936,10 +1936,10 @@ void NeonEmitter::createIntrinsic(Record *R,
std::string Proto = std::string(R->getValueAsString("Prototype"));
std::string Types = std::string(R->getValueAsString("Types"));
Record *OperationRec = R->getValueAsDef("Operation");
- bool CartesianProductOfTypes = R->getValueAsBit("CartesianProductOfTypes");
bool BigEndianSafe = R->getValueAsBit("BigEndianSafe");
std::string Guard = std::string(R->getValueAsString("ArchGuard"));
bool IsUnavailable = OperationRec->getValueAsBit("Unavailable");
+ std::string CartesianProductWith = std::string(R->getValueAsString("CartesianProductWith"));
// Set the global current record. This allows assert_with_loc to produce
// decent location information even when highly nested.
@@ -1954,17 +1954,20 @@ void NeonEmitter::createIntrinsic(Record *R,
CK = ClassMap[R->getSuperClasses()[1].first];
std::vector<std::pair<TypeSpec, TypeSpec>> NewTypeSpecs;
- for (auto TS : TypeSpecs) {
- if (CartesianProductOfTypes) {
+ if (!CartesianProductWith.empty()) {
+ std::vector<TypeSpec> ProductTypeSpecs = TypeSpec::fromTypeSpecs(CartesianProductWith);
+ for (auto TS : TypeSpecs) {
Type DefaultT(TS, ".");
- for (auto SrcTS : TypeSpecs) {
+ for (auto SrcTS : ProductTypeSpecs) {
Type DefaultSrcT(SrcTS, ".");
if (TS == SrcTS ||
DefaultSrcT.getSizeInBits() != DefaultT.getSizeInBits())
continue;
NewTypeSpecs.push_back(std::make_pair(TS, SrcTS));
}
- } else {
+ }
+ } else {
+ for (auto TS : TypeSpecs) {
NewTypeSpecs.push_back(std::make_pair(TS, TS));
}
}
More information about the cfe-commits
mailing list