[clang] 599b2f0 - [AArch64][SVE] Handle svbool_t VLST <-> VLAT/GNUT conversion

Jun Ma via cfe-commits cfe-commits at lists.llvm.org
Wed Jul 21 22:55:30 PDT 2021


Author: Jun Ma
Date: 2021-07-22T13:55:08+08:00
New Revision: 599b2f00370ee79e812d2776f2af57fae36d02e9

URL: https://github.com/llvm/llvm-project/commit/599b2f00370ee79e812d2776f2af57fae36d02e9
DIFF: https://github.com/llvm/llvm-project/commit/599b2f00370ee79e812d2776f2af57fae36d02e9.diff

LOG: [AArch64][SVE] Handle svbool_t VLST <-> VLAT/GNUT conversion

According to https://godbolt.org/z/q5rME1naY and acle, we found that
there are different SVE conversion behaviours between clang and gcc. It turns
out that llvm does not handle SVE predicates width properly.

This patch 1) checks SVE predicates width rightly with svbool_t type.
2) removes warning on svbool_t VLST <-> VLAT/GNUT conversion.
3) disables VLST <-> VLAT/GNUT conversion between SVE vectors and predicates
due to different width.

Differential Revision: https://reviews.llvm.org/D106333

Added: 
    

Modified: 
    clang/lib/AST/ASTContext.cpp
    clang/lib/Sema/SemaChecking.cpp
    clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
    clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp
    clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp
    clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 46b778d2834a8..e102a3ba508d4 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -8670,6 +8670,14 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
   return false;
 }
 
+/// getSVETypeSize - Return SVE vector or predicate register size.
+static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) {
+  assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type");
+  return Ty->getKind() == BuiltinType::SveBool
+             ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth()
+             : Context.getLangOpts().ArmSveVectorBits;
+}
+
 bool ASTContext::areCompatibleSveTypes(QualType FirstType,
                                        QualType SecondType) {
   assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
@@ -8687,7 +8695,7 @@ bool ASTContext::areCompatibleSveTypes(QualType FirstType,
           return VT->getElementType().getCanonicalType() ==
                  FirstType->getSveEltType(*this);
         else if (VT->getVectorKind() == VectorType::GenericVector)
-          return getTypeSize(SecondType) == getLangOpts().ArmSveVectorBits &&
+          return getTypeSize(SecondType) == getSVETypeSize(*this, BT) &&
                  hasSameType(VT->getElementType(),
                              getBuiltinVectorTypeInfo(BT).ElementType);
       }
@@ -8706,7 +8714,8 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
          "Expected SVE builtin type and vector type!");
 
   auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
-    if (!FirstType->getAs<BuiltinType>())
+    const auto *BT = FirstType->getAs<BuiltinType>();
+    if (!BT)
       return false;
 
     const auto *VecTy = SecondType->getAs<VectorType>();
@@ -8716,13 +8725,19 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
       const LangOptions::LaxVectorConversionKind LVCKind =
           getLangOpts().getLaxVectorConversions();
 
+      // Can not convert between sve predicates and sve vectors because of
+      // 
diff erent size.
+      if (BT->getKind() == BuiltinType::SveBool &&
+          VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector)
+        return false;
+
       // If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
       // "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
       // converts to VLAT and VLAT implicitly converts to GNUT."
       // ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
       // predicates.
       if (VecTy->getVectorKind() == VectorType::GenericVector &&
-          getTypeSize(SecondType) != getLangOpts().ArmSveVectorBits)
+          getTypeSize(SecondType) != getSVETypeSize(*this, BT))
         return false;
 
       // If -flax-vector-conversions=all is specified, the types are

diff  --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 242c2968da45f..02da39c11d7f9 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -12570,15 +12570,13 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
     checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral);
 
   // Strip vector types.
-  if (const auto *SourceVT = dyn_cast<VectorType>(Source)) {
-    if (Target->isVLSTBuiltinType()) {
-      auto SourceVectorKind = SourceVT->getVectorKind();
-      if (SourceVectorKind == VectorType::SveFixedLengthDataVector ||
-          SourceVectorKind == VectorType::SveFixedLengthPredicateVector ||
-          (SourceVectorKind == VectorType::GenericVector &&
-           S.Context.getTypeSize(Source) == S.getLangOpts().ArmSveVectorBits))
-        return;
-    }
+  if (isa<VectorType>(Source)) {
+    if (Target->isVLSTBuiltinType() &&
+        (S.Context.areCompatibleSveTypes(QualType(Target, 0),
+                                         QualType(Source, 0)) ||
+         S.Context.areLaxCompatibleSveTypes(QualType(Target, 0),
+                                            QualType(Source, 0))))
+      return;
 
     if (!isa<VectorType>(Target)) {
       if (S.SourceMgr.isInSystemMacro(CC))

diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
index a7f275bd1f0b4..c27651f55a188 100644
--- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
+++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c
@@ -7,6 +7,7 @@
 
 typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
 typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
+typedef uint8_t uint8_vec_t __attribute__((vector_size(N / 64)));
 
 fixed_bool_t global_pred;
 fixed_int32_t global_vec;
@@ -115,26 +116,26 @@ fixed_bool_t address_of_array_idx() {
 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca <16 x i32>, align 16
 // CHECK-NEXT:    [[PRED_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 2
 // CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca <vscale x 4 x i32>, align 16
-// CHECK-NEXT:    [[XX:%.*]] = alloca <16 x i32>, align 16
-// CHECK-NEXT:    [[YY:%.*]] = alloca <16 x i32>, align 16
+// CHECK-NEXT:    [[XX:%.*]] = alloca <8 x i8>, align 8
+// CHECK-NEXT:    [[YY:%.*]] = alloca <8 x i8>, align 8
 // CHECK-NEXT:    [[PG:%.*]] = alloca <vscale x 16 x i1>, align 2
 // CHECK-NEXT:    [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8
-// CHECK-NEXT:    [[SAVED_VALUE1:%.*]] = alloca <16 x i32>, align 64
+// CHECK-NEXT:    [[SAVED_VALUE1:%.*]] = alloca <8 x i8>, align 8
 // CHECK-NEXT:    store <vscale x 16 x i1> [[PRED:%.*]], <vscale x 16 x i1>* [[PRED_ADDR]], align 2
 // CHECK-NEXT:    store <vscale x 4 x i32> [[VEC:%.*]], <vscale x 4 x i32>* [[VEC_ADDR]], align 16
-// CHECK-NEXT:    store <16 x i32> <i32 1, i32 2, i32 3, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <16 x i32>* [[XX]], align 16
-// CHECK-NEXT:    store <16 x i32> <i32 2, i32 5, i32 4, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <16 x i32>* [[YY]], align 16
+// CHECK-NEXT:    store <8 x i8> <i8 1, i8 2, i8 3, i8 4, i8 0, i8 0, i8 0, i8 0>, <8 x i8>* [[XX]], align 8
+// CHECK-NEXT:    store <8 x i8> <i8 2, i8 5, i8 4, i8 6, i8 0, i8 0, i8 0, i8 0>, <8 x i8>* [[YY]], align 8
 // CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PRED_ADDR]], align 2
 // CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* @global_pred, align 2
 // CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[SAVED_VALUE]], align 8
 // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to <vscale x 16 x i1>*
 // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE]], align 8
-// CHECK-NEXT:    [[TMP3:%.*]] = load <16 x i32>, <16 x i32>* [[XX]], align 16
-// CHECK-NEXT:    [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* [[YY]], align 16
-// CHECK-NEXT:    [[ADD:%.*]] = add <16 x i32> [[TMP3]], [[TMP4]]
-// CHECK-NEXT:    store <16 x i32> [[ADD]], <16 x i32>* [[SAVED_VALUE1]], align 64
-// CHECK-NEXT:    [[CASTFIXEDSVE2:%.*]] = bitcast <16 x i32>* [[SAVED_VALUE1]] to <vscale x 16 x i1>*
-// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE2]], align 64
+// CHECK-NEXT:    [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[XX]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[YY]], align 8
+// CHECK-NEXT:    [[ADD:%.*]] = add <8 x i8> [[TMP3]], [[TMP4]]
+// CHECK-NEXT:    store <8 x i8> [[ADD]], <8 x i8>* [[SAVED_VALUE1]], align 8
+// CHECK-NEXT:    [[CASTFIXEDSVE2:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE1]] to <vscale x 16 x i1>*
+// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[CASTFIXEDSVE2]], align 8
 // CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.and.z.nxv16i1(<vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[TMP5]])
 // CHECK-NEXT:    store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[PG]], align 2
 // CHECK-NEXT:    [[TMP7:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[PG]], align 2
@@ -150,8 +151,8 @@ fixed_bool_t address_of_array_idx() {
 // CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE4]]
 //
 fixed_int32_t test_cast(svbool_t pred, svint32_t vec) {
-  fixed_int32_t xx = {1, 2, 3, 4};
-  fixed_int32_t yy = {2, 5, 4, 6};
+  uint8_vec_t xx = {1, 2, 3, 4};
+  uint8_vec_t yy = {2, 5, 4, 6};
   svbool_t pg = svand_z(pred, global_pred, xx + yy);
   return svadd_m(pg, global_vec, vec);
 }

diff  --git a/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp b/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp
index a93110db7cce0..616984781edb1 100644
--- a/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp
+++ b/clang/test/SemaCXX/aarch64-sve-explicit-casts-fixed-size.cpp
@@ -15,6 +15,7 @@ typedef svfloat32_t fixed_float32_t FIXED_ATTR;
 typedef svfloat64_t fixed_float64_t FIXED_ATTR;
 typedef svint32_t fixed_int32_t FIXED_ATTR;
 typedef svint64_t fixed_int64_t FIXED_ATTR;
+typedef svbool_t fixed_bool_t FIXED_ATTR;
 
 // SVE VLSTs can be cast to SVE VLATs, regardless of lane size.
 // NOTE: the list below is NOT exhaustive for all SVE types.
@@ -47,3 +48,5 @@ TESTCASE(fixed_int64_t, svfloat32_t)
 TESTCASE(fixed_int64_t, svfloat64_t)
 TESTCASE(fixed_int64_t, svint32_t)
 TESTCASE(fixed_int64_t, svint64_t)
+
+TESTCASE(fixed_bool_t, svbool_t)

diff  --git a/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp b/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp
index 1a1addcf1c1ba..2cd4af0bb0045 100644
--- a/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp
+++ b/clang/test/SemaCXX/aarch64-sve-lax-vector-conversions.cpp
@@ -2,22 +2,25 @@
 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -msve-vector-bits=512 -flax-vector-conversions=integer -fallow-half-arguments-and-returns -ffreestanding -fsyntax-only -verify=lax-vector-integer %s
 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -msve-vector-bits=512 -flax-vector-conversions=all -fallow-half-arguments-and-returns -ffreestanding -fsyntax-only -verify=lax-vector-all %s
 
-// lax-vector-all-no-diagnostics
-
 #include <arm_sve.h>
 
 #define N __ARM_FEATURE_SVE_BITS
 #define SVE_FIXED_ATTR __attribute__((arm_sve_vector_bits(N)))
 #define GNU_FIXED_ATTR __attribute__((vector_size(N / 8)))
+#define GNU_BOOL_FIXED_ATTR __attribute__((vector_size(N / 64)))
 
 typedef svfloat32_t sve_fixed_float32_t SVE_FIXED_ATTR;
 typedef svint32_t sve_fixed_int32_t SVE_FIXED_ATTR;
+typedef svbool_t sve_fixed_bool_t SVE_FIXED_ATTR;
 typedef float gnu_fixed_float32_t GNU_FIXED_ATTR;
 typedef int gnu_fixed_int32_t GNU_FIXED_ATTR;
+typedef int8_t gnu_fixed_bool_t GNU_BOOL_FIXED_ATTR;
 
 void sve_allowed_with_integer_lax_conversions() {
   sve_fixed_int32_t fi32;
   svint64_t si64;
+  svbool_t sb8;
+  sve_fixed_bool_t fb8;
 
   // The implicit cast here should fail if -flax-vector-conversions=none, but pass if
   // -flax-vector-conversions={integer,all}.
@@ -25,6 +28,25 @@ void sve_allowed_with_integer_lax_conversions() {
   // lax-vector-none-error at -1 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
   si64 = fi32;
   // lax-vector-none-error at -1 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
+
+  fi32 = sb8;
+  // lax-vector-none-error at -1 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'sve_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
+  sb8 = fi32;
+  // lax-vector-none-error at -1 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}}
+
+  si64 = fb8;
+  // lax-vector-none-error at -1 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
+
+  fb8 = si64;
+  // lax-vector-none-error at -1 {{assigning to 'sve_fixed_bool_t' (vector of 8 'unsigned char' values) from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'sve_fixed_bool_t' (vector of 8 'unsigned char' values) from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'sve_fixed_bool_t' (vector of 8 'unsigned char' values) from incompatible type}}
 }
 
 void sve_allowed_with_all_lax_conversions() {
@@ -44,6 +66,8 @@ void sve_allowed_with_all_lax_conversions() {
 void gnu_allowed_with_integer_lax_conversions() {
   gnu_fixed_int32_t fi32;
   svint64_t si64;
+  svbool_t sb8;
+  gnu_fixed_bool_t fb8;
 
   // The implicit cast here should fail if -flax-vector-conversions=none, but pass if
   // -flax-vector-conversions={integer,all}.
@@ -51,6 +75,24 @@ void gnu_allowed_with_integer_lax_conversions() {
   // lax-vector-none-error at -1 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
   si64 = fi32;
   // lax-vector-none-error at -1 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
+
+  fi32 = sb8;
+  // lax-vector-none-error at -1 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'gnu_fixed_int32_t' (vector of 16 'int' values) from incompatible type}}
+  sb8 = fi32;
+  // lax-vector-none-error at -1 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'svbool_t' (aka '__SVBool_t') from incompatible type}}
+
+  fb8 = si64;
+  // lax-vector-none-error at -1 {{assigning to 'gnu_fixed_bool_t' (vector of 8 'int8_t' values) from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'gnu_fixed_bool_t' (vector of 8 'int8_t' values) from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'gnu_fixed_bool_t' (vector of 8 'int8_t' values) from incompatible type}}
+  si64 = fb8;
+  // lax-vector-none-error at -1 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
+  // lax-vector-integer-error at -2 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
+  // lax-vector-all-error at -3 {{assigning to 'svint64_t' (aka '__SVInt64_t') from incompatible type}}
 }
 
 void gnu_allowed_with_all_lax_conversions() {

diff  --git a/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp b/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp
index 0437a264f65bb..eb713612f223d 100644
--- a/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp
+++ b/clang/test/SemaCXX/attr-arm-sve-vector-bits.cpp
@@ -9,6 +9,10 @@ typedef __SVInt8_t svint8_t;
 typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N)));
 typedef int8_t gnu_int8_t __attribute__((vector_size(N / 8)));
 
+typedef __SVBool_t svbool_t;
+typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));
+typedef int8_t gnu_bool_t __attribute__((vector_size(N / 64)));
+
 template<typename T> struct S { T var; };
 
 S<fixed_int8_t> s;
@@ -24,3 +28,11 @@ gnu_int8_t from_svint8_t__to_gnu_int8_t(svint8_t x) { return x; }
 // Test implicit casts between GNU and VLS vectors
 fixed_int8_t to_fixed_int8_t__from_gnu_int8_t(gnu_int8_t x) { return x; }
 gnu_int8_t from_fixed_int8_t__to_gnu_int8_t(fixed_int8_t x) { return x; }
+
+// Test implicit casts between VLA and VLS predicates
+svbool_t to_svbool_t(fixed_bool_t x) { return x; }
+fixed_bool_t from_svbool_t(svbool_t x) { return x; }
+
+// Test implicit casts between GNU and VLA predicates
+svbool_t to_svbool_t__from_gnu_bool_t(gnu_bool_t x) { return x; }
+gnu_bool_t from_svbool_t__to_gnu_bool_t(svbool_t x) { return x; }


        


More information about the cfe-commits mailing list