[clang] 9810035 - [SVE] Ensure proper mangling of ACLE tuple types

Sander de Smalen via cfe-commits cfe-commits at lists.llvm.org
Sun Jun 14 23:38:14 PDT 2020


Author: Sander de Smalen
Date: 2020-06-15T07:36:12+01:00
New Revision: 98100353d784e599fca502a34490603942f1930c

URL: https://github.com/llvm/llvm-project/commit/98100353d784e599fca502a34490603942f1930c
DIFF: https://github.com/llvm/llvm-project/commit/98100353d784e599fca502a34490603942f1930c.diff

LOG: [SVE] Ensure proper mangling of ACLE tuple types

The AAPCS specifies that the tuple types such as `svint32x2_t`
should use their `arm_sve.h` names when mangled instead of their
builtin names.

This patch also renames the internal types for the tuples to
be prefixed with `__clang_`, so they are not misinterpreted as
specified internal types like the non-tuple types which *are* defined
in the AAPCS. Using a builtin type for the tuples is a purely
a choice of the Clang implementation.

Reviewers: rsandifo-arm, c-rhodes, efriedma, rengolin

Reviewed By: efriedma

Tags: #clang

Differential Revision: https://reviews.llvm.org/D81721

Added: 
    

Modified: 
    clang/include/clang/Basic/AArch64SVEACLETypes.def
    clang/lib/AST/ASTContext.cpp
    clang/lib/AST/ItaniumMangle.cpp
    clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
    clang/utils/TableGen/SveEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/AArch64SVEACLETypes.def b/clang/include/clang/Basic/AArch64SVEACLETypes.def
index 2daf4c76a1ad..b0950d1058bf 100644
--- a/clang/include/clang/Basic/AArch64SVEACLETypes.def
+++ b/clang/include/clang/Basic/AArch64SVEACLETypes.def
@@ -38,83 +38,84 @@
 //===----------------------------------------------------------------------===//
 
 #ifndef SVE_VECTOR_TYPE
-#define SVE_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, IsSigned, IsFP) \
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits,    \
+                        IsSigned, IsFP)                                        \
   SVE_TYPE(Name, Id, SingletonId)
 #endif
 
 #ifndef SVE_PREDICATE_TYPE
-#define SVE_PREDICATE_TYPE(Name, Id, SingletonId, NumEls)\
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls)         \
   SVE_TYPE(Name, Id, SingletonId)
 #endif
 
 //===- Vector point types -----------------------------------------------===//
 
-SVE_VECTOR_TYPE("__SVInt8_t",  SveInt8, SveInt8Ty, 16, 8, true, false)
-SVE_VECTOR_TYPE("__SVInt16_t", SveInt16, SveInt16Ty, 8, 16, true, false)
-SVE_VECTOR_TYPE("__SVInt32_t", SveInt32, SveInt32Ty, 4, 32, true, false)
-SVE_VECTOR_TYPE("__SVInt64_t", SveInt64, SveInt64Ty, 2, 64, true, false)
+SVE_VECTOR_TYPE("__SVInt8_t", "__SVInt8_t",  SveInt8, SveInt8Ty, 16, 8, true, false)
+SVE_VECTOR_TYPE("__SVInt16_t", "__SVInt16_t", SveInt16, SveInt16Ty, 8, 16, true, false)
+SVE_VECTOR_TYPE("__SVInt32_t", "__SVInt32_t", SveInt32, SveInt32Ty, 4, 32, true, false)
+SVE_VECTOR_TYPE("__SVInt64_t", "__SVInt64_t", SveInt64, SveInt64Ty, 2, 64, true, false)
 
-SVE_VECTOR_TYPE("__SVUint8_t",  SveUint8, SveUint8Ty, 16, 8, false, false)
-SVE_VECTOR_TYPE("__SVUint16_t", SveUint16, SveUint16Ty, 8, 16, false, false)
-SVE_VECTOR_TYPE("__SVUint32_t", SveUint32, SveUint32Ty, 4, 32, false, false)
-SVE_VECTOR_TYPE("__SVUint64_t", SveUint64, SveUint64Ty, 2, 64, false, false)
+SVE_VECTOR_TYPE("__SVUint8_t", "__SVUint8_t",  SveUint8, SveUint8Ty, 16, 8, false, false)
+SVE_VECTOR_TYPE("__SVUint16_t", "__SVUint16_t", SveUint16, SveUint16Ty, 8, 16, false, false)
+SVE_VECTOR_TYPE("__SVUint32_t", "__SVUint32_t", SveUint32, SveUint32Ty, 4, 32, false, false)
+SVE_VECTOR_TYPE("__SVUint64_t", "__SVUint64_t", SveUint64, SveUint64Ty, 2, 64, false, false)
 
-SVE_VECTOR_TYPE("__SVFloat16_t", SveFloat16, SveFloat16Ty, 8, 16, true, true)
-SVE_VECTOR_TYPE("__SVFloat32_t", SveFloat32, SveFloat32Ty, 4, 32, true, true)
-SVE_VECTOR_TYPE("__SVFloat64_t", SveFloat64, SveFloat64Ty, 2, 64, true, true)
+SVE_VECTOR_TYPE("__SVFloat16_t", "__SVFloat16_t", SveFloat16, SveFloat16Ty, 8, 16, true, true)
+SVE_VECTOR_TYPE("__SVFloat32_t", "__SVFloat32_t", SveFloat32, SveFloat32Ty, 4, 32, true, true)
+SVE_VECTOR_TYPE("__SVFloat64_t", "__SVFloat64_t", SveFloat64, SveFloat64Ty, 2, 64, true, true)
 
 //
 // x2
 //
-SVE_VECTOR_TYPE("__SVInt8x2_t",  SveInt8x2, SveInt8x2Ty, 32, 8, true, false)
-SVE_VECTOR_TYPE("__SVInt16x2_t", SveInt16x2, SveInt16x2Ty, 16, 16, true, false)
-SVE_VECTOR_TYPE("__SVInt32x2_t", SveInt32x2, SveInt32x2Ty, 8, 32, true, false)
-SVE_VECTOR_TYPE("__SVInt64x2_t", SveInt64x2, SveInt64x2Ty, 4, 64, true, false)
+SVE_VECTOR_TYPE("__clang_svint8x2_t", "svint8x2_t",  SveInt8x2, SveInt8x2Ty, 32, 8, true, false)
+SVE_VECTOR_TYPE("__clang_svint16x2_t", "svint16x2_t", SveInt16x2, SveInt16x2Ty, 16, 16, true, false)
+SVE_VECTOR_TYPE("__clang_svint32x2_t", "svint32x2_t", SveInt32x2, SveInt32x2Ty, 8, 32, true, false)
+SVE_VECTOR_TYPE("__clang_svint64x2_t", "svint64x2_t", SveInt64x2, SveInt64x2Ty, 4, 64, true, false)
 
-SVE_VECTOR_TYPE("__SVUint8x2_t",  SveUint8x2, SveUint8x2Ty, 32, 8, false, false)
-SVE_VECTOR_TYPE("__SVUint16x2_t", SveUint16x2, SveUint16x2Ty, 16, 16, false, false)
-SVE_VECTOR_TYPE("__SVUint32x2_t", SveUint32x2, SveUint32x2Ty, 8, 32, false, false)
-SVE_VECTOR_TYPE("__SVUint64x2_t", SveUint64x2, SveUint64x2Ty, 4, 64, false, false)
+SVE_VECTOR_TYPE("__clang_svuint8x2_t", "svuint8x2_t",  SveUint8x2, SveUint8x2Ty, 32, 8, false, false)
+SVE_VECTOR_TYPE("__clang_svuint16x2_t", "svuint16x2_t", SveUint16x2, SveUint16x2Ty, 16, 16, false, false)
+SVE_VECTOR_TYPE("__clang_svuint32x2_t", "svuint32x2_t", SveUint32x2, SveUint32x2Ty, 8, 32, false, false)
+SVE_VECTOR_TYPE("__clang_svuint64x2_t", "svuint64x2_t", SveUint64x2, SveUint64x2Ty, 4, 64, false, false)
 
-SVE_VECTOR_TYPE("__SVFloat16x2_t", SveFloat16x2, SveFloat16x2Ty, 16, 16, true, true)
-SVE_VECTOR_TYPE("__SVFloat32x2_t", SveFloat32x2, SveFloat32x2Ty, 8, 32, true, true)
-SVE_VECTOR_TYPE("__SVFloat64x2_t", SveFloat64x2, SveFloat64x2Ty, 4, 64, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat16x2_t", "svfloat16x2_t", SveFloat16x2, SveFloat16x2Ty, 16, 16, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat32x2_t", "svfloat32x2_t", SveFloat32x2, SveFloat32x2Ty, 8, 32, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat64x2_t", "svfloat64x2_t", SveFloat64x2, SveFloat64x2Ty, 4, 64, true, true)
 
 //
 // x3
 //
-SVE_VECTOR_TYPE("__SVInt8x3_t",  SveInt8x3, SveInt8x3Ty, 48, 8, true, false)
-SVE_VECTOR_TYPE("__SVInt16x3_t", SveInt16x3, SveInt16x3Ty, 24, 16, true, false)
-SVE_VECTOR_TYPE("__SVInt32x3_t", SveInt32x3, SveInt32x3Ty, 12, 32, true, false)
-SVE_VECTOR_TYPE("__SVInt64x3_t", SveInt64x3, SveInt64x3Ty, 6, 64, true, false)
+SVE_VECTOR_TYPE("__clang_svint8x3_t", "svint8x3_t",  SveInt8x3, SveInt8x3Ty, 48, 8, true, false)
+SVE_VECTOR_TYPE("__clang_svint16x3_t", "svint16x3_t", SveInt16x3, SveInt16x3Ty, 24, 16, true, false)
+SVE_VECTOR_TYPE("__clang_svint32x3_t", "svint32x3_t", SveInt32x3, SveInt32x3Ty, 12, 32, true, false)
+SVE_VECTOR_TYPE("__clang_svint64x3_t", "svint64x3_t", SveInt64x3, SveInt64x3Ty, 6, 64, true, false)
 
-SVE_VECTOR_TYPE("__SVUint8x3_t",  SveUint8x3, SveUint8x3Ty, 48, 8, false, false)
-SVE_VECTOR_TYPE("__SVUint16x3_t", SveUint16x3, SveUint16x3Ty, 24, 16, false, false)
-SVE_VECTOR_TYPE("__SVUint32x3_t", SveUint32x3, SveUint32x3Ty, 12, 32, false, false)
-SVE_VECTOR_TYPE("__SVUint64x3_t", SveUint64x3, SveUint64x3Ty, 6, 64, false, false)
+SVE_VECTOR_TYPE("__clang_svuint8x3_t", "svuint8x3_t",  SveUint8x3, SveUint8x3Ty, 48, 8, false, false)
+SVE_VECTOR_TYPE("__clang_svuint16x3_t", "svuint16x3_t", SveUint16x3, SveUint16x3Ty, 24, 16, false, false)
+SVE_VECTOR_TYPE("__clang_svuint32x3_t", "svuint32x3_t", SveUint32x3, SveUint32x3Ty, 12, 32, false, false)
+SVE_VECTOR_TYPE("__clang_svuint64x3_t", "svuint64x3_t", SveUint64x3, SveUint64x3Ty, 6, 64, false, false)
 
-SVE_VECTOR_TYPE("__SVFloat16x3_t", SveFloat16x3, SveFloat16x3Ty, 24, 16, true, true)
-SVE_VECTOR_TYPE("__SVFloat32x3_t", SveFloat32x3, SveFloat32x3Ty, 12, 32, true, true)
-SVE_VECTOR_TYPE("__SVFloat64x3_t", SveFloat64x3, SveFloat64x3Ty, 6, 64, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat16x3_t", "svfloat16x3_t", SveFloat16x3, SveFloat16x3Ty, 24, 16, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat32x3_t", "svfloat32x3_t", SveFloat32x3, SveFloat32x3Ty, 12, 32, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat64x3_t", "svfloat64x3_t", SveFloat64x3, SveFloat64x3Ty, 6, 64, true, true)
 
 //
 // x4
 //
-SVE_VECTOR_TYPE("__SVInt8x4_t",  SveInt8x4, SveInt8x4Ty, 64, 8, true, false)
-SVE_VECTOR_TYPE("__SVInt16x4_t", SveInt16x4, SveInt16x4Ty, 32, 16, true, false)
-SVE_VECTOR_TYPE("__SVInt32x4_t", SveInt32x4, SveInt32x4Ty, 16, 32, true, false)
-SVE_VECTOR_TYPE("__SVInt64x4_t", SveInt64x4, SveInt64x4Ty, 8, 64, true, false)
+SVE_VECTOR_TYPE("__clang_svint8x4_t", "svint8x4_t",  SveInt8x4, SveInt8x4Ty, 64, 8, true, false)
+SVE_VECTOR_TYPE("__clang_svint16x4_t", "svint16x4_t", SveInt16x4, SveInt16x4Ty, 32, 16, true, false)
+SVE_VECTOR_TYPE("__clang_svint32x4_t", "svint32x4_t", SveInt32x4, SveInt32x4Ty, 16, 32, true, false)
+SVE_VECTOR_TYPE("__clang_svint64x4_t", "svint64x4_t", SveInt64x4, SveInt64x4Ty, 8, 64, true, false)
 
-SVE_VECTOR_TYPE("__SVUint8x4_t",  SveUint8x4, SveUint8x4Ty, 64, 8, false, false)
-SVE_VECTOR_TYPE("__SVUint16x4_t", SveUint16x4, SveUint16x4Ty, 32, 16, false, false)
-SVE_VECTOR_TYPE("__SVUint32x4_t", SveUint32x4, SveUint32x4Ty, 16, 32, false, false)
-SVE_VECTOR_TYPE("__SVUint64x4_t", SveUint64x4, SveUint64x4Ty, 8, 64, false, false)
+SVE_VECTOR_TYPE("__clang_svuint8x4_t", "svuint8x4_t",  SveUint8x4, SveUint8x4Ty, 64, 8, false, false)
+SVE_VECTOR_TYPE("__clang_svuint16x4_t", "svuint16x4_t", SveUint16x4, SveUint16x4Ty, 32, 16, false, false)
+SVE_VECTOR_TYPE("__clang_svuint32x4_t", "svuint32x4_t", SveUint32x4, SveUint32x4Ty, 16, 32, false, false)
+SVE_VECTOR_TYPE("__clang_svuint64x4_t", "svuint64x4_t", SveUint64x4, SveUint64x4Ty, 8, 64, false, false)
 
-SVE_VECTOR_TYPE("__SVFloat16x4_t", SveFloat16x4, SveFloat16x4Ty, 32, 16, true, true)
-SVE_VECTOR_TYPE("__SVFloat32x4_t", SveFloat32x4, SveFloat32x4Ty, 16, 32, true, true)
-SVE_VECTOR_TYPE("__SVFloat64x4_t", SveFloat64x4, SveFloat64x4Ty, 8, 64, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat16x4_t", "svfloat16x4_t", SveFloat16x4, SveFloat16x4Ty, 32, 16, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat32x4_t", "svfloat32x4_t", SveFloat32x4, SveFloat32x4Ty, 16, 32, true, true)
+SVE_VECTOR_TYPE("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, SveFloat64x4Ty, 8, 64, true, true)
 
-SVE_PREDICATE_TYPE("__SVBool_t", SveBool, SveBoolTy, 16)
+SVE_PREDICATE_TYPE("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16)
 
 #undef SVE_VECTOR_TYPE
 #undef SVE_PREDICATE_TYPE

diff  --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index e2df64107c9e..91efa19d158b 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -2131,12 +2131,13 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
     // Because the length is only known at runtime, we use a dummy value
     // of 0 for the static length.  The alignment values are those defined
     // by the Procedure Call Standard for the Arm Architecture.
-#define SVE_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, IsSigned, IsFP) \
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits,    \
+                        IsSigned, IsFP)                                        \
   case BuiltinType::Id:                                                        \
     Width = 0;                                                                 \
     Align = 128;                                                               \
     break;
-#define SVE_PREDICATE_TYPE(Name, Id, SingletonId, NumEls)                      \
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls)         \
   case BuiltinType::Id:                                                        \
     Width = 0;                                                                 \
     Align = 16;                                                                \
@@ -3640,14 +3641,15 @@ QualType ASTContext::getScalableVectorType(QualType EltTy,
                                            unsigned NumElts) const {
   if (Target->hasAArch64SVETypes()) {
     uint64_t EltTySize = getTypeSize(EltTy);
-#define SVE_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, IsSigned, IsFP) \
+#define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits,    \
+                        IsSigned, IsFP)                                        \
   if (!EltTy->isBooleanType() &&                                               \
       ((EltTy->hasIntegerRepresentation() &&                                   \
         EltTy->hasSignedIntegerRepresentation() == IsSigned) ||                \
        (EltTy->hasFloatingRepresentation() && IsFP)) &&                        \
       EltTySize == ElBits && NumElts == NumEls)                                \
     return SingletonId;
-#define SVE_PREDICATE_TYPE(Name, Id, SingletonId, NumEls)                      \
+#define SVE_PREDICATE_TYPE(Name, MangledName, Id, SingletonId, NumEls)         \
   if (EltTy->isBooleanType() && NumElts == NumEls)                             \
     return SingletonId;
 #include "clang/Basic/AArch64SVEACLETypes.def"

diff  --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 46815e5a107e..247f5667bef9 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -2820,10 +2820,18 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
   // The SVE types are effectively target-specific.  The mangling scheme
   // is defined in the appendices to the Procedure Call Standard for the
   // Arm Architecture.
-#define SVE_TYPE(Name, Id, SingletonId) \
-  case BuiltinType::Id: \
-    type_name = Name; \
-    Out << 'u' << type_name.size() << type_name; \
+#define SVE_VECTOR_TYPE(InternalName, MangledName, Id, SingletonId, NumEls,    \
+                        ElBits, IsSigned, IsFP)                                \
+  case BuiltinType::Id:                                                        \
+    type_name = MangledName;                                                   \
+    Out << (type_name == InternalName ? "u" : "") << type_name.size()          \
+        << type_name;                                                          \
+    break;
+#define SVE_PREDICATE_TYPE(InternalName, MangledName, Id, SingletonId, NumEls) \
+  case BuiltinType::Id:                                                        \
+    type_name = MangledName;                                                   \
+    Out << (type_name == InternalName ? "u" : "") << type_name.size()          \
+        << type_name;                                                          \
     break;
 #include "clang/Basic/AArch64SVEACLETypes.def"
   }

diff  --git a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
index 87e38ab3b012..66e3c1306dd2 100644
--- a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
+++ b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
@@ -29,3 +29,72 @@ void f10(S<__SVFloat32_t>) {}
 void f11(S<__SVFloat64_t>) {}
 // CHECK: _Z3f121SIu10__SVBool_tE
 void f12(S<__SVBool_t>) {}
+
+// The tuple types don't use the internal name for mangling.
+
+// CHECK: _Z3f131SI10svint8x2_tE
+void f13(S<__clang_svint8x2_t>) {}
+// CHECK: _Z3f141SI10svint8x3_tE
+void f14(S<__clang_svint8x3_t>) {}
+// CHECK: _Z3f151SI10svint8x4_tE
+void f15(S<__clang_svint8x4_t>) {}
+// CHECK: _Z3f161SI11svint16x2_tE
+void f16(S<__clang_svint16x2_t>) {}
+// CHECK: _Z3f171SI11svint16x3_tE
+void f17(S<__clang_svint16x3_t>) {}
+// CHECK: _Z3f181SI11svint16x4_tE
+void f18(S<__clang_svint16x4_t>) {}
+// CHECK: _Z3f191SI11svint32x2_tE
+void f19(S<__clang_svint32x2_t>) {}
+// CHECK: _Z3f201SI11svint32x3_tE
+void f20(S<__clang_svint32x3_t>) {}
+// CHECK: _Z3f211SI11svint32x4_tE
+void f21(S<__clang_svint32x4_t>) {}
+// CHECK: _Z3f221SI11svint64x2_tE
+void f22(S<__clang_svint64x2_t>) {}
+// CHECK: _Z3f231SI11svint64x3_tE
+void f23(S<__clang_svint64x3_t>) {}
+// CHECK: _Z3f241SI11svint64x4_tE
+void f24(S<__clang_svint64x4_t>) {}
+// CHECK: _Z3f251SI11svuint8x2_tE
+void f25(S<__clang_svuint8x2_t>) {}
+// CHECK: _Z3f261SI11svuint8x3_tE
+void f26(S<__clang_svuint8x3_t>) {}
+// CHECK: _Z3f271SI11svuint8x4_tE
+void f27(S<__clang_svuint8x4_t>) {}
+// CHECK: _Z3f281SI12svuint16x2_tE
+void f28(S<__clang_svuint16x2_t>) {}
+// CHECK: _Z3f291SI12svuint16x3_tE
+void f29(S<__clang_svuint16x3_t>) {}
+// CHECK: _Z3f301SI12svuint16x4_tE
+void f30(S<__clang_svuint16x4_t>) {}
+// CHECK: _Z3f311SI12svuint32x2_tE
+void f31(S<__clang_svuint32x2_t>) {}
+// CHECK: _Z3f321SI12svuint32x3_tE
+void f32(S<__clang_svuint32x3_t>) {}
+// CHECK: _Z3f331SI12svuint32x4_tE
+void f33(S<__clang_svuint32x4_t>) {}
+// CHECK: _Z3f341SI12svuint64x2_tE
+void f34(S<__clang_svuint64x2_t>) {}
+// CHECK: _Z3f351SI12svuint64x3_tE
+void f35(S<__clang_svuint64x3_t>) {}
+// CHECK: _Z3f361SI12svuint64x4_tE
+void f36(S<__clang_svuint64x4_t>) {}
+// CHECK: _Z3f371SI13svfloat16x2_tE
+void f37(S<__clang_svfloat16x2_t>) {}
+// CHECK: _Z3f381SI13svfloat16x3_tE
+void f38(S<__clang_svfloat16x3_t>) {}
+// CHECK: _Z3f391SI13svfloat16x4_tE
+void f39(S<__clang_svfloat16x4_t>) {}
+// CHECK: _Z3f401SI13svfloat32x2_tE
+void f40(S<__clang_svfloat32x2_t>) {}
+// CHECK: _Z3f411SI13svfloat32x3_tE
+void f41(S<__clang_svfloat32x3_t>) {}
+// CHECK: _Z3f421SI13svfloat32x4_tE
+void f42(S<__clang_svfloat32x4_t>) {}
+// CHECK: _Z3f431SI13svfloat64x2_tE
+void f43(S<__clang_svfloat64x2_t>) {}
+// CHECK: _Z3f441SI13svfloat64x3_tE
+void f44(S<__clang_svfloat64x3_t>) {}
+// CHECK: _Z3f451SI13svfloat64x4_tE
+void f45(S<__clang_svfloat64x4_t>) {}

diff  --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index c0d203c72f54..54b50482bb8f 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -1069,39 +1069,39 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
   OS << "typedef __SVFloat16_t svfloat16_t;\n";
   OS << "typedef __SVFloat32_t svfloat32_t;\n";
   OS << "typedef __SVFloat64_t svfloat64_t;\n";
-  OS << "typedef __SVInt8x2_t svint8x2_t;\n";
-  OS << "typedef __SVInt16x2_t svint16x2_t;\n";
-  OS << "typedef __SVInt32x2_t svint32x2_t;\n";
-  OS << "typedef __SVInt64x2_t svint64x2_t;\n";
-  OS << "typedef __SVUint8x2_t svuint8x2_t;\n";
-  OS << "typedef __SVUint16x2_t svuint16x2_t;\n";
-  OS << "typedef __SVUint32x2_t svuint32x2_t;\n";
-  OS << "typedef __SVUint64x2_t svuint64x2_t;\n";
-  OS << "typedef __SVFloat16x2_t svfloat16x2_t;\n";
-  OS << "typedef __SVFloat32x2_t svfloat32x2_t;\n";
-  OS << "typedef __SVFloat64x2_t svfloat64x2_t;\n";
-  OS << "typedef __SVInt8x3_t svint8x3_t;\n";
-  OS << "typedef __SVInt16x3_t svint16x3_t;\n";
-  OS << "typedef __SVInt32x3_t svint32x3_t;\n";
-  OS << "typedef __SVInt64x3_t svint64x3_t;\n";
-  OS << "typedef __SVUint8x3_t svuint8x3_t;\n";
-  OS << "typedef __SVUint16x3_t svuint16x3_t;\n";
-  OS << "typedef __SVUint32x3_t svuint32x3_t;\n";
-  OS << "typedef __SVUint64x3_t svuint64x3_t;\n";
-  OS << "typedef __SVFloat16x3_t svfloat16x3_t;\n";
-  OS << "typedef __SVFloat32x3_t svfloat32x3_t;\n";
-  OS << "typedef __SVFloat64x3_t svfloat64x3_t;\n";
-  OS << "typedef __SVInt8x4_t svint8x4_t;\n";
-  OS << "typedef __SVInt16x4_t svint16x4_t;\n";
-  OS << "typedef __SVInt32x4_t svint32x4_t;\n";
-  OS << "typedef __SVInt64x4_t svint64x4_t;\n";
-  OS << "typedef __SVUint8x4_t svuint8x4_t;\n";
-  OS << "typedef __SVUint16x4_t svuint16x4_t;\n";
-  OS << "typedef __SVUint32x4_t svuint32x4_t;\n";
-  OS << "typedef __SVUint64x4_t svuint64x4_t;\n";
-  OS << "typedef __SVFloat16x4_t svfloat16x4_t;\n";
-  OS << "typedef __SVFloat32x4_t svfloat32x4_t;\n";
-  OS << "typedef __SVFloat64x4_t svfloat64x4_t;\n";
+  OS << "typedef __clang_svint8x2_t svint8x2_t;\n";
+  OS << "typedef __clang_svint16x2_t svint16x2_t;\n";
+  OS << "typedef __clang_svint32x2_t svint32x2_t;\n";
+  OS << "typedef __clang_svint64x2_t svint64x2_t;\n";
+  OS << "typedef __clang_svuint8x2_t svuint8x2_t;\n";
+  OS << "typedef __clang_svuint16x2_t svuint16x2_t;\n";
+  OS << "typedef __clang_svuint32x2_t svuint32x2_t;\n";
+  OS << "typedef __clang_svuint64x2_t svuint64x2_t;\n";
+  OS << "typedef __clang_svfloat16x2_t svfloat16x2_t;\n";
+  OS << "typedef __clang_svfloat32x2_t svfloat32x2_t;\n";
+  OS << "typedef __clang_svfloat64x2_t svfloat64x2_t;\n";
+  OS << "typedef __clang_svint8x3_t svint8x3_t;\n";
+  OS << "typedef __clang_svint16x3_t svint16x3_t;\n";
+  OS << "typedef __clang_svint32x3_t svint32x3_t;\n";
+  OS << "typedef __clang_svint64x3_t svint64x3_t;\n";
+  OS << "typedef __clang_svuint8x3_t svuint8x3_t;\n";
+  OS << "typedef __clang_svuint16x3_t svuint16x3_t;\n";
+  OS << "typedef __clang_svuint32x3_t svuint32x3_t;\n";
+  OS << "typedef __clang_svuint64x3_t svuint64x3_t;\n";
+  OS << "typedef __clang_svfloat16x3_t svfloat16x3_t;\n";
+  OS << "typedef __clang_svfloat32x3_t svfloat32x3_t;\n";
+  OS << "typedef __clang_svfloat64x3_t svfloat64x3_t;\n";
+  OS << "typedef __clang_svint8x4_t svint8x4_t;\n";
+  OS << "typedef __clang_svint16x4_t svint16x4_t;\n";
+  OS << "typedef __clang_svint32x4_t svint32x4_t;\n";
+  OS << "typedef __clang_svint64x4_t svint64x4_t;\n";
+  OS << "typedef __clang_svuint8x4_t svuint8x4_t;\n";
+  OS << "typedef __clang_svuint16x4_t svuint16x4_t;\n";
+  OS << "typedef __clang_svuint32x4_t svuint32x4_t;\n";
+  OS << "typedef __clang_svuint64x4_t svuint64x4_t;\n";
+  OS << "typedef __clang_svfloat16x4_t svfloat16x4_t;\n";
+  OS << "typedef __clang_svfloat32x4_t svfloat32x4_t;\n";
+  OS << "typedef __clang_svfloat64x4_t svfloat64x4_t;\n";
   OS << "typedef __SVBool_t  svbool_t;\n\n";
 
   OS << "typedef enum\n";


        


More information about the cfe-commits mailing list