[clang] c883f82 - [PowerPC][CLANG] DMF VSX Vector float GER 2x (rank-2 update) (#147383)

via cfe-commits cfe-commits at lists.llvm.org
Mon Sep 8 12:29:36 PDT 2025


Author: Lei Huang
Date: 2025-09-08T15:29:32-04:00
New Revision: c883f82ce8dca5dec51d3226a68451548eaa2e56

URL: https://github.com/llvm/llvm-project/commit/c883f82ce8dca5dec51d3226a68451548eaa2e56
DIFF: https://github.com/llvm/llvm-project/commit/c883f82ce8dca5dec51d3226a68451548eaa2e56.diff

LOG: [PowerPC][CLANG] DMF VSX Vector float GER 2x (rank-2 update) (#147383)

Add clang builtins for DMF VSX Vector floats:

```
void __builtin_mma_dmxvf16gerx2 (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvf16gerx2nn (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvf16gerx2np (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvf16gerx2pn (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvf16gerx2pp (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_pmdmxvf16gerx2 (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvf16gerx2nn (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvf16gerx2np (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvf16gerx2pn (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvf16gerx2pp (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);

void __builtin_mma_dmxvbf16gerx2 (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvbf16gerx2nn (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvbf16gerx2np (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvbf16gerx2pn (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_dmxvbf16gerx2pp (__dmr1024 *, __vector_pair, vec_t);
void __builtin_mma_pmdmxvbf16gerx2 (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvbf16gerx2nn (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvbf16gerx2np (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvbf16gerx2pn (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
void __builtin_mma_pmdmxvbf16gerx2pp (__dmr1024 *, __vector_pair, vec_t, uint8, uint4, uint2);
```

Added: 
    clang/test/CodeGen/PowerPC/builtins-dmf-vsx-vector-float.c

Modified: 
    clang/include/clang/Basic/BuiltinsPPC.def
    clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def
index 22926b6a7d095..db71efc238386 100644
--- a/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/clang/include/clang/Basic/BuiltinsPPC.def
@@ -1122,6 +1122,14 @@ UNALIASED_CUSTOM_MMA_BUILTIN(mma_xvbf16ger2, "vW512*VV",
                              "mma,paired-vector-memops")
 UNALIASED_CUSTOM_MMA_BUILTIN(mma_pmxvbf16ger2, "vW512*VVi15i15i3",
                              "mma,paired-vector-memops")
+UNALIASED_CUSTOM_MMA_BUILTIN(mma_dmxvbf16gerx2, "vW1024*W256V",
+                             "mma,isa-future-instructions")
+UNALIASED_CUSTOM_MMA_BUILTIN(mma_pmdmxvbf16gerx2, "vW1024*W256Vi255i15i3",
+                             "mma,isa-future-instructions")
+UNALIASED_CUSTOM_MMA_BUILTIN(mma_dmxvf16gerx2, "vW1024*W256V",
+                             "mma,isa-future-instructions")
+UNALIASED_CUSTOM_MMA_BUILTIN(mma_pmdmxvf16gerx2, "vW1024*W256Vi255i15i3",
+                             "mma,isa-future-instructions")
 
 // FIXME: Obviously incomplete.
 

diff  --git a/clang/test/CodeGen/PowerPC/builtins-dmf-vsx-vector-float.c b/clang/test/CodeGen/PowerPC/builtins-dmf-vsx-vector-float.c
new file mode 100644
index 0000000000000..8fc9a68a5a613
--- /dev/null
+++ b/clang/test/CodeGen/PowerPC/builtins-dmf-vsx-vector-float.c
@@ -0,0 +1,309 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
+//       Update then manual applied to commonize the checks for AIX and LoP.
+// RUN: %clang_cc1 -O3 -triple powerpc64le-unknown-unknown -target-cpu future \
+// RUN:            -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -O3 -triple powerpc64-ibm-aix -target-cpu future \
+// RUN:             -emit-llvm %s -o - | FileCheck %s
+
+// CHECK-LABEL: void @test_dmxvbf16gerx2(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvbf16gerx2(<256 x i1> [[TMP0]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP1]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvbf16gerx2(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvbf16gerx2(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvbf16gerx2nn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvbf16gerx2nn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvbf16gerx2nn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvbf16gerx2nn(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvbf16gerx2np(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvbf16gerx2np(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvbf16gerx2np(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvbf16gerx2np(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvbf16gerx2pn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvbf16gerx2pn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvbf16gerx2pn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvbf16gerx2pn(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvbf16gerx2pp(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvbf16gerx2pp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvbf16gerx2pp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvbf16gerx2pp(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvbf16gerx2(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvbf16gerx2(<256 x i1> [[TMP0]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP1]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvbf16gerx2(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvbf16gerx2(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvbf16gerx2nn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvbf16gerx2nn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvbf16gerx2nn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvbf16gerx2nn(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvbf16gerx2np(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvbf16gerx2np(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvbf16gerx2np(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvbf16gerx2np(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvbf16gerx2pn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvbf16gerx2pn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvbf16gerx2pn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvbf16gerx2pn(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvbf16gerx2pp(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvbf16gerx2pp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvbf16gerx2pp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvbf16gerx2pp(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvf16gerx2(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvf16gerx2(<256 x i1> [[TMP0]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP1]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvf16gerx2(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvf16gerx2(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvf16gerx2nn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvf16gerx2nn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvf16gerx2nn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvf16gerx2nn(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvf16gerx2np(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvf16gerx2np(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvf16gerx2np(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvf16gerx2np(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvf16gerx2pn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvf16gerx2pn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvf16gerx2pn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvf16gerx2pn(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_dmxvf16gerx2pp(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.dmxvf16gerx2pp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]])
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_dmxvf16gerx2pp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_dmxvf16gerx2pp(&vdmr, vp, vc);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvf16gerx2(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvf16gerx2(<256 x i1> [[TMP0]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP1]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvf16gerx2(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvf16gerx2(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvf16gerx2nn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvf16gerx2nn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvf16gerx2nn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvf16gerx2nn(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvf16gerx2np(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvf16gerx2np(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvf16gerx2np(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvf16gerx2np(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvf16gerx2pn(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvf16gerx2pn(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvf16gerx2pn(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvf16gerx2pn(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK-LABEL: void @test_pmdmxvf16gerx2pp(
+// CHECK-NEXT:  [[ENTRY:.*:]]
+// CHECK-NEXT:    [[TMP0:%.*]] = load <1024 x i1>, ptr [[VDMRP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load <256 x i1>, ptr [[VPP:%.*]], align 32, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = tail call <1024 x i1> @llvm.ppc.mma.pmdmxvf16gerx2pp(<1024 x i1> [[TMP0]], <256 x i1> [[TMP1]], <16 x i8> [[VC:%.*]], i32 0, i32 0, i32 0)
+// CHECK-NEXT:    store <1024 x i1> [[TMP2]], ptr [[RESP:%.*]], align 128, !tbaa [[TBAA6]]
+// CHECK-NEXT:    ret void
+//
+void test_pmdmxvf16gerx2pp(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc, unsigned char *resp) {
+  __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
+  __vector_pair vp = *((__vector_pair *)vpp);
+  __builtin_mma_pmdmxvf16gerx2pp(&vdmr, vp, vc, 0, 0, 0);
+  *((__dmr1024 *)resp) = vdmr;
+}
+
+// CHECK: [[TBAA2]] = !{[[META3:![0-9]+]], [[META3]], i64 0}
+// CHECK: [[META3]] = !{!"__vector_pair", [[META4:![0-9]+]], i64 0}
+// CHECK: [[META4]] = !{!"omnipotent char", [[META5:![0-9]+]], i64 0}
+// CHECK: [[META5]] = !{!"Simple C/C++ TBAA"}
+// CHECK: [[TBAA6]] = !{[[META7:![0-9]+]], [[META7]], i64 0}
+// CHECK: [[META7]] = !{!"__dmr1024", [[META4]], i64 0}

diff  --git a/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c b/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c
index ea2b99b0e5b20..06497555b840f 100644
--- a/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c
+++ b/clang/test/CodeGen/PowerPC/ppc-dmf-mma-builtin-err.c
@@ -1,9 +1,14 @@
-// RUN: not %clang_cc1 -triple powerpc64le-unknown-linux-gnu -target-cpu pwr10 \
-// RUN:   %s -emit-llvm-only 2>&1 | FileCheck %s
-// RUN: not %clang_cc1 -triple powerpc64le-unknown-linux-gnu -target-cpu future \
-// RUN:   %s -emit-llvm-only 2>&1 | FileCheck %s
+// RUN: not %clang_cc1 -triple powerpc64le-unknown-linux-gnu -target-feature -mma \
+// RUN:   -target-cpu pwr10 %s -emit-llvm-only 2>&1 | \
+// RUN:   FileCheck --check-prefixes=CHECK,ISA_FUTURE  %s
+// RUN: not %clang_cc1 -triple powerpc64le-unknown-linux-gnu -target-feature -mma \
+// RUN:   -target-cpu future %s -emit-llvm-only 2>&1 | \
+// RUN:   FileCheck --check-prefixes=CHECK,ISA_FUTURE  %s
+// RUN: not %clang_cc1 -triple powerpc64le-unknown-linux-gnu -target-feature \
+// RUN:   -isa-future-instructions -target-cpu future %s -emit-llvm-only 2>&1 | \
+// RUN:   FileCheck --check-prefix=ISA_FUTURE %s
 
-__attribute__((target("no-mma")))
+//__attribute__((target("no-mma")))
 void test_mma(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc) {
   __dmr1024 vdmr = *((__dmr1024 *)vdmrp);
   __vector_pair vp = *((__vector_pair *)vpp);
@@ -25,9 +30,57 @@ void test_mma(unsigned char *vdmrp, unsigned char *vpp, vector unsigned char vc)
 // CHECK: error: '__builtin_mma_pmdmxvi8gerx4pp' needs target feature mma,paired-vector-memops
 // CHECK: error: '__builtin_mma_dmxvi8gerx4spp' needs target feature mma,paired-vector-memops
 // CHECK: error: '__builtin_mma_pmdmxvi8gerx4spp' needs target feature mma,paired-vector-memops
-// CHECK: error: '__builtin_mma_dmsetdmrz' needs target feature mma,isa-future-instructions
-// CHECK: error: '__builtin_mma_dmmr' needs target feature mma,isa-future-instructions
-// CHECK: error: '__builtin_mma_dmxor' needs target feature mma,isa-future-instructions
-// CHECK: error: '__builtin_mma_build_dmr' needs target feature mma,isa-future-instructions
-// CHECK: error: '__builtin_mma_disassemble_dmr' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmsetdmrz' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmmr' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxor' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_build_dmr' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_disassemble_dmr' needs target feature mma,isa-future-instructions
+
+  // DMF VSX Vector bfloat16 GER 2x builtins.
+
+  __builtin_mma_dmxvbf16gerx2(&vdmr, vp, vc);
+  __builtin_mma_dmxvbf16gerx2nn(&vdmr, vp, vc);
+  __builtin_mma_dmxvbf16gerx2np(&vdmr, vp, vc);
+  __builtin_mma_dmxvbf16gerx2pn(&vdmr, vp, vc);
+  __builtin_mma_dmxvbf16gerx2pp(&vdmr, vp, vc);
+  __builtin_mma_pmdmxvbf16gerx2(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvbf16gerx2nn(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvbf16gerx2np(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvbf16gerx2pn(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvbf16gerx2pp(&vdmr, vp, vc, 0, 0, 0);
+
+// ISA_FUTURE: error: '__builtin_mma_dmxvbf16gerx2' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvbf16gerx2nn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvbf16gerx2np' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvbf16gerx2pn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvbf16gerx2pp' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvbf16gerx2' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvbf16gerx2nn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvbf16gerx2np' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvbf16gerx2pn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvbf16gerx2pp' needs target feature mma,isa-future-instructions
+
+  // DMF VSX Vector 16-bitFloating-point GER 2x builtins.
+
+  __builtin_mma_dmxvf16gerx2(&vdmr, vp, vc);
+  __builtin_mma_dmxvf16gerx2nn(&vdmr, vp, vc);
+  __builtin_mma_dmxvf16gerx2np(&vdmr, vp, vc);
+  __builtin_mma_dmxvf16gerx2pn(&vdmr, vp, vc);
+  __builtin_mma_dmxvf16gerx2pp(&vdmr, vp, vc);
+  __builtin_mma_pmdmxvf16gerx2(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvf16gerx2nn(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvf16gerx2np(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvf16gerx2pn(&vdmr, vp, vc, 0, 0, 0);
+  __builtin_mma_pmdmxvf16gerx2pp(&vdmr, vp, vc, 0, 0, 0);
+
+// ISA_FUTURE: error: '__builtin_mma_dmxvf16gerx2' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvf16gerx2nn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvf16gerx2np' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvf16gerx2pn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_dmxvf16gerx2pp' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvf16gerx2' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvf16gerx2nn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvf16gerx2np' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvf16gerx2pn' needs target feature mma,isa-future-instructions
+// ISA_FUTURE: error: '__builtin_mma_pmdmxvf16gerx2pp' needs target feature mma,isa-future-instructions
 }


        


More information about the cfe-commits mailing list