[clang] 8b137a4 - [clang][BFloat] Add create/set/get/dup intrinsics

Ties Stuij via cfe-commits cfe-commits at lists.llvm.org
Fri Jun 5 06:35:21 PDT 2020


Author: Ties Stuij
Date: 2020-06-05T14:35:10+01:00
New Revision: 8b137a430636c6626fcc6ef93b05eb69d6183e57

URL: https://github.com/llvm/llvm-project/commit/8b137a430636c6626fcc6ef93b05eb69d6183e57
DIFF: https://github.com/llvm/llvm-project/commit/8b137a430636c6626fcc6ef93b05eb69d6183e57.diff

LOG: [clang][BFloat] Add create/set/get/dup intrinsics

Summary:
This patch is part of a series that adds support for the Bfloat16 extension of
the Armv8.6-a architecture, as detailed here:

https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/arm-architecture-developments-armv8-6-a

The bfloat type and its properties are specified in the Arm Architecture
Reference Manual:

https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile

The following people contributed to this patch:
- Luke Cheeseman
- Momchil Velikov
- Luke Geeson
- Ties Stuij
- Mikhail Maltsev

Reviewers: t.p.northover, sdesmalen, fpetrogalli, LukeGeeson, stuij, labrinea

Reviewed By: labrinea

Subscribers: miyuki, dmgreen, labrinea, kristof.beyls, ilya-biryukov, MaskRay, jkorous, arphaman, usaxena95, cfe-commits

Tags: #clang

Differential Revision: https://reviews.llvm.org/D79710

Added: 
    clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c
    clang/test/CodeGen/arm-bf16-getset-intrinsics.c

Modified: 
    clang/include/clang/Basic/arm_neon.td
    clang/lib/CodeGen/CGBuiltin.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/arm_neon.td b/clang/include/clang/Basic/arm_neon.td
index 82e44aaec69b..98fda8b13142 100644
--- a/clang/include/clang/Basic/arm_neon.td
+++ b/clang/include/clang/Basic/arm_neon.td
@@ -190,20 +190,28 @@ def OP_SCALAR_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1,
 def OP_SCALAR_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1,
                               (call "vget_lane", $p2, $p3)))>;
 
-def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t",
-                                   (call "vget_lane",
-                                         (bitcast "int16x4_t", $p0), $p1))>;
-def OP_SCALAR_HALF_GET_LNQ : Op<(bitcast "float16_t",
-                                    (call "vget_lane",
-                                          (bitcast "int16x8_t", $p0), $p1))>;
-def OP_SCALAR_HALF_SET_LN : Op<(bitcast "float16x4_t",
-                                   (call "vset_lane",
-                                         (bitcast "int16_t", $p0),
-                                         (bitcast "int16x4_t", $p1), $p2))>;
-def OP_SCALAR_HALF_SET_LNQ : Op<(bitcast "float16x8_t",
-                                    (call "vset_lane",
-                                          (bitcast "int16_t", $p0),
-                                          (bitcast "int16x8_t", $p1), $p2))>;
+multiclass ScalarGetSetLaneOpsF16<string scalarTy,
+                                  string vectorTy4, string vectorTy8> {
+  def _GET_LN  : Op<(bitcast scalarTy,
+                        (call "vget_lane",
+                            (bitcast "int16x4_t", $p0), $p1))>;
+  def _GET_LNQ : Op<(bitcast scalarTy,
+                        (call "vget_lane",
+                            (bitcast "int16x8_t", $p0), $p1))>;
+  def _SET_LN  : Op<(bitcast vectorTy4,
+                        (call "vset_lane",
+                            (bitcast "int16_t", $p0),
+                            (bitcast "int16x4_t", $p1), $p2))>;
+  def _SET_LNQ : Op<(bitcast vectorTy8,
+                        (call "vset_lane",
+                            (bitcast "int16_t", $p0),
+                            (bitcast "int16x8_t", $p1), $p2))>;
+}
+
+defm OP_SCALAR_HALF: ScalarGetSetLaneOpsF16<"float16_t",
+                                            "float16x4_t", "float16x8_t">;
+defm OP_SCALAR_BF16: ScalarGetSetLaneOpsF16<"bfloat16_t",
+                                            "bfloat16x4_t", "bfloat16x8_t">;
 
 def OP_DOT_LN
     : Op<(call "vdot", $p0, $p1,
@@ -247,6 +255,12 @@ def SPLATQ : WInst<"splat_laneq", ".(!Q)I",
                    "UcUsUicsilPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUlhdQhQdPlQPl"> {
   let isLaneQ = 1;
 }
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+  def SPLAT_BF  : WInst<"splat_lane", ".(!q)I", "bQb">;
+  def SPLATQ_BF : WInst<"splat_laneq", ".(!Q)I", "bQb"> {
+    let isLaneQ = 1;
+  }
+}
 
 //===----------------------------------------------------------------------===//
 // Intrinsics
@@ -1841,3 +1855,39 @@ let ArchGuard = "defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)" in {
   def VCADDQ_ROT90_FP64  : SInst<"vcaddq_rot90", "QQQ", "d">;
   def VCADDQ_ROT270_FP64 : SInst<"vcaddq_rot270", "QQQ", "d">;
 }
+
+// V8.2-A BFloat intrinsics
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)" in {
+  def VCREATE_BF : NoTestOpInst<"vcreate", ".(IU>)", "b", OP_CAST> {
+    let BigEndianSafe = 1;
+  }
+
+  def VDUP_N_BF    : WOpInst<"vdup_n", ".1", "bQb", OP_DUP>;
+
+  def VDUP_LANE_BF : WOpInst<"vdup_lane", ".qI", "bQb", OP_DUP_LN>;
+  def VDUP_LANEQ_BF: WOpInst<"vdup_laneq", ".QI", "bQb", OP_DUP_LN> {
+    let isLaneQ = 1;
+  }
+
+  def VCOMBINE_BF  : NoTestOpInst<"vcombine", "Q..", "b", OP_CONC>;
+
+  def VGET_HIGH_BF : NoTestOpInst<"vget_high", ".Q", "b", OP_HI>;
+  def VGET_LOW_BF  : NoTestOpInst<"vget_low", ".Q", "b", OP_LO>;
+
+  def VGET_LANE_BF : IOpInst<"vget_lane", "1.I", "b", OP_SCALAR_BF16_GET_LN>;
+  def VSET_LANE_BF : IOpInst<"vset_lane", ".1.I", "b", OP_SCALAR_BF16_SET_LN>;
+  def VGET_LANEQ_BF : IOpInst<"vget_lane", "1.I", "Qb", OP_SCALAR_BF16_GET_LNQ>;
+  def VSET_LANEQ_BF : IOpInst<"vset_lane", ".1.I", "Qb", OP_SCALAR_BF16_SET_LNQ>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)" in {
+  def SCALAR_VDUP_LANE_BF_A32 : IOpInst<"vduph_lane", "1.I", "b", OP_SCALAR_BF16_GET_LN>;
+  def SCALAR_VDUP_LANEQ_BF_A32 : IOpInst<"vduph_laneq", "1.I", "Hb", OP_SCALAR_BF16_GET_LNQ>;
+}
+
+let ArchGuard = "defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in {
+  def SCALAR_VDUP_LANE_BF_A64 : IInst<"vdup_lane", "1.I", "Sb">;
+  def SCALAR_VDUP_LANEQ_BF_A64 : IInst<"vdup_laneq", "1QI", "Sb"> {
+    let isLaneQ = 1;
+  }
+}

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 6b7863a16f1c..2acb5a3d8422 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -9368,10 +9368,12 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
                           : Intrinsic::aarch64_neon_sqsub;
     return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
   }
+  case NEON::BI__builtin_neon_vduph_lane_bf16:
   case NEON::BI__builtin_neon_vduph_lane_f16: {
     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
                                         "vget_lane");
   }
+  case NEON::BI__builtin_neon_vduph_laneq_bf16:
   case NEON::BI__builtin_neon_vduph_laneq_f16: {
     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
                                         "vgetq_lane");

diff  --git a/clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c b/clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c
new file mode 100644
index 000000000000..27b3b345544c
--- /dev/null
+++ b/clang/test/CodeGen/aarch64-bf16-getset-intrinsics.c
@@ -0,0 +1,151 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple aarch64-arm-none-eabi -target-feature +neon -target-feature +bf16 \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg -instcombine | FileCheck %s
+
+#include <arm_neon.h>
+
+// CHECK-LABEL: @test_vcreate_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast i64 [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vcreate_bf16(uint64_t a) {
+  return vcreate_bf16(a);
+}
+
+// CHECK-LABEL: @test_vdup_n_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VECINIT_I:%.*]] = insertelement <4 x bfloat> undef, bfloat [[V:%.*]], i32 0
+// CHECK-NEXT:    [[VECINIT3_I:%.*]] = shufflevector <4 x bfloat> [[VECINIT_I]], <4 x bfloat> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x bfloat> [[VECINIT3_I]]
+//
+bfloat16x4_t test_vdup_n_bf16(bfloat16_t v) {
+  return vdup_n_bf16(v);
+}
+
+// CHECK-LABEL: @test_vdupq_n_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VECINIT_I:%.*]] = insertelement <8 x bfloat> undef, bfloat [[V:%.*]], i32 0
+// CHECK-NEXT:    [[VECINIT7_I:%.*]] = shufflevector <8 x bfloat> [[VECINIT_I]], <8 x bfloat> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x bfloat> [[VECINIT7_I]]
+//
+bfloat16x8_t test_vdupq_n_bf16(bfloat16_t v) {
+  return vdupq_n_bf16(v);
+}
+
+// CHECK-LABEL: @test_vdup_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <4 x bfloat> [[V:%.*]], <4 x bfloat> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+// CHECK-NEXT:    ret <4 x bfloat> [[LANE]]
+//
+bfloat16x4_t test_vdup_lane_bf16(bfloat16x4_t v) {
+  return vdup_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vdupq_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <4 x bfloat> [[V:%.*]], <4 x bfloat> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+// CHECK-NEXT:    ret <8 x bfloat> [[LANE]]
+//
+bfloat16x8_t test_vdupq_lane_bf16(bfloat16x4_t v) {
+  return vdupq_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vdup_laneq_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <8 x bfloat> [[V:%.*]], <8 x bfloat> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+// CHECK-NEXT:    ret <4 x bfloat> [[LANE]]
+//
+bfloat16x4_t test_vdup_laneq_bf16(bfloat16x8_t v) {
+  return vdup_laneq_bf16(v, 7);
+}
+
+// CHECK-LABEL: @test_vdupq_laneq_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <8 x bfloat> [[V:%.*]], <8 x bfloat> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+// CHECK-NEXT:    ret <8 x bfloat> [[LANE]]
+//
+bfloat16x8_t test_vdupq_laneq_bf16(bfloat16x8_t v) {
+  return vdupq_laneq_bf16(v, 7);
+}
+
+// CHECK-LABEL: @test_vcombine_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SHUFFLE_I:%.*]] = shufflevector <4 x bfloat> [[LOW:%.*]], <4 x bfloat> [[HIGH:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+// CHECK-NEXT:    ret <8 x bfloat> [[SHUFFLE_I]]
+//
+bfloat16x8_t test_vcombine_bf16(bfloat16x4_t low, bfloat16x4_t high) {
+  return vcombine_bf16(low, high);
+}
+
+// CHECK-LABEL: @test_vget_high_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+// CHECK-NEXT:    ret <4 x bfloat> [[SHUFFLE_I]]
+//
+bfloat16x4_t test_vget_high_bf16(bfloat16x8_t a) {
+  return vget_high_bf16(a);
+}
+
+// CHECK-LABEL: @test_vget_low_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+// CHECK-NEXT:    ret <4 x bfloat> [[SHUFFLE_I]]
+//
+bfloat16x4_t test_vget_low_bf16(bfloat16x8_t a) {
+  return vget_low_bf16(a);
+}
+
+// CHECK-LABEL: @test_vget_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTCAST1:%.*]] = extractelement <4 x bfloat> [[V:%.*]], i32 1
+// CHECK-NEXT:    ret bfloat [[DOTCAST1]]
+//
+bfloat16_t test_vget_lane_bf16(bfloat16x4_t v) {
+  return vget_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vgetq_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTCAST1:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7
+// CHECK-NEXT:    ret bfloat [[DOTCAST1]]
+//
+bfloat16_t test_vgetq_lane_bf16(bfloat16x8_t v) {
+  return vgetq_lane_bf16(v, 7);
+}
+
+// CHECK-LABEL: @test_vset_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 1
+// CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vset_lane_bf16(bfloat16_t a, bfloat16x4_t v) {
+  return vset_lane_bf16(a, v, 1);
+}
+
+// CHECK-LABEL: @test_vsetq_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = insertelement <8 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 7
+// CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vsetq_lane_bf16(bfloat16_t a, bfloat16x8_t v) {
+  return vsetq_lane_bf16(a, v, 7);
+}
+
+// CHECK-LABEL: @test_vduph_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x bfloat> [[V:%.*]], i32 1
+// CHECK-NEXT:    ret bfloat [[VGET_LANE]]
+//
+bfloat16_t test_vduph_lane_bf16(bfloat16x4_t v) {
+  return vduph_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vduph_laneq_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7
+// CHECK-NEXT:    ret bfloat [[VGETQ_LANE]]
+//
+bfloat16_t test_vduph_laneq_bf16(bfloat16x8_t v) {
+  return vduph_laneq_bf16(v, 7);
+}

diff  --git a/clang/test/CodeGen/arm-bf16-getset-intrinsics.c b/clang/test/CodeGen/arm-bf16-getset-intrinsics.c
new file mode 100644
index 000000000000..ed35f810d18f
--- /dev/null
+++ b/clang/test/CodeGen/arm-bf16-getset-intrinsics.c
@@ -0,0 +1,151 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple armv8.6a-arm-none-eabi -target-feature +neon -target-feature +bf16 -mfloat-abi hard \
+// RUN:  -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg -instcombine | FileCheck %s
+
+#include <arm_neon.h>
+
+// CHECK-LABEL: @test_vcreate_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast i64 [[A:%.*]] to <4 x bfloat>
+// CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vcreate_bf16(uint64_t a) {
+  return vcreate_bf16(a);
+}
+
+// CHECK-LABEL: @test_vdup_n_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VECINIT_I:%.*]] = insertelement <4 x bfloat> undef, bfloat [[V:%.*]], i32 0
+// CHECK-NEXT:    [[VECINIT3_I:%.*]] = shufflevector <4 x bfloat> [[VECINIT_I]], <4 x bfloat> undef, <4 x i32> zeroinitializer
+// CHECK-NEXT:    ret <4 x bfloat> [[VECINIT3_I]]
+//
+bfloat16x4_t test_vdup_n_bf16(bfloat16_t v) {
+  return vdup_n_bf16(v);
+}
+
+// CHECK-LABEL: @test_vdupq_n_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VECINIT_I:%.*]] = insertelement <8 x bfloat> undef, bfloat [[V:%.*]], i32 0
+// CHECK-NEXT:    [[VECINIT7_I:%.*]] = shufflevector <8 x bfloat> [[VECINIT_I]], <8 x bfloat> undef, <8 x i32> zeroinitializer
+// CHECK-NEXT:    ret <8 x bfloat> [[VECINIT7_I]]
+//
+bfloat16x8_t test_vdupq_n_bf16(bfloat16_t v) {
+  return vdupq_n_bf16(v);
+}
+
+// CHECK-LABEL: @test_vdup_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <4 x bfloat> [[V:%.*]], <4 x bfloat> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+// CHECK-NEXT:    ret <4 x bfloat> [[LANE]]
+//
+bfloat16x4_t test_vdup_lane_bf16(bfloat16x4_t v) {
+  return vdup_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vdupq_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <4 x bfloat> [[V:%.*]], <4 x bfloat> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+// CHECK-NEXT:    ret <8 x bfloat> [[LANE]]
+//
+bfloat16x8_t test_vdupq_lane_bf16(bfloat16x4_t v) {
+  return vdupq_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vdup_laneq_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <8 x bfloat> [[V:%.*]], <8 x bfloat> undef, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
+// CHECK-NEXT:    ret <4 x bfloat> [[LANE]]
+//
+bfloat16x4_t test_vdup_laneq_bf16(bfloat16x8_t v) {
+  return vdup_laneq_bf16(v, 7);
+}
+
+// CHECK-LABEL: @test_vdupq_laneq_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[LANE:%.*]] = shufflevector <8 x bfloat> [[V:%.*]], <8 x bfloat> undef, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
+// CHECK-NEXT:    ret <8 x bfloat> [[LANE]]
+//
+bfloat16x8_t test_vdupq_laneq_bf16(bfloat16x8_t v) {
+  return vdupq_laneq_bf16(v, 7);
+}
+
+// CHECK-LABEL: @test_vcombine_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SHUFFLE_I:%.*]] = shufflevector <4 x bfloat> [[LOW:%.*]], <4 x bfloat> [[HIGH:%.*]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+// CHECK-NEXT:    ret <8 x bfloat> [[SHUFFLE_I]]
+//
+bfloat16x8_t test_vcombine_bf16(bfloat16x4_t low, bfloat16x4_t high) {
+  return vcombine_bf16(low, high);
+}
+
+// CHECK-LABEL: @test_vget_high_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+// CHECK-NEXT:    ret <4 x bfloat> [[SHUFFLE_I]]
+//
+bfloat16x4_t test_vget_high_bf16(bfloat16x8_t a) {
+  return vget_high_bf16(a);
+}
+
+// CHECK-LABEL: @test_vget_low_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[SHUFFLE_I:%.*]] = shufflevector <8 x bfloat> [[A:%.*]], <8 x bfloat> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+// CHECK-NEXT:    ret <4 x bfloat> [[SHUFFLE_I]]
+//
+bfloat16x4_t test_vget_low_bf16(bfloat16x8_t a) {
+  return vget_low_bf16(a);
+}
+
+// CHECK-LABEL: @test_vget_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTCAST1:%.*]] = extractelement <4 x bfloat> [[V:%.*]], i32 1
+// CHECK-NEXT:    ret bfloat [[DOTCAST1]]
+//
+bfloat16_t test_vget_lane_bf16(bfloat16x4_t v) {
+  return vget_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vgetq_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTCAST1:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7
+// CHECK-NEXT:    ret bfloat [[DOTCAST1]]
+//
+bfloat16_t test_vgetq_lane_bf16(bfloat16x8_t v) {
+  return vgetq_lane_bf16(v, 7);
+}
+
+// CHECK-LABEL: @test_vset_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = insertelement <4 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 1
+// CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
+//
+bfloat16x4_t test_vset_lane_bf16(bfloat16_t a, bfloat16x4_t v) {
+  return vset_lane_bf16(a, v, 1);
+}
+
+// CHECK-LABEL: @test_vsetq_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = insertelement <8 x bfloat> [[V:%.*]], bfloat [[A:%.*]], i32 7
+// CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
+//
+bfloat16x8_t test_vsetq_lane_bf16(bfloat16_t a, bfloat16x8_t v) {
+  return vsetq_lane_bf16(a, v, 7);
+}
+
+// CHECK-LABEL: @test_vduph_lane_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VGET_LANE:%.*]] = extractelement <4 x bfloat> [[V:%.*]], i32 1
+// CHECK-NEXT:    ret bfloat [[VGET_LANE]]
+//
+bfloat16_t test_vduph_lane_bf16(bfloat16x4_t v) {
+  return vduph_lane_bf16(v, 1);
+}
+
+// CHECK-LABEL: @test_vduph_laneq_bf16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[VGETQ_LANE:%.*]] = extractelement <8 x bfloat> [[V:%.*]], i32 7
+// CHECK-NEXT:    ret bfloat [[VGETQ_LANE]]
+//
+bfloat16_t test_vduph_laneq_bf16(bfloat16x8_t v) {
+  return vduph_laneq_bf16(v, 7);
+}


        


More information about the cfe-commits mailing list