[clang] [RISCV] full support for riscv_rvv_vector_bits attribute (PR #100110)
Vladislav Belov via cfe-commits
cfe-commits at lists.llvm.org
Wed Jul 31 03:37:53 PDT 2024
https://github.com/vbe-sc updated https://github.com/llvm/llvm-project/pull/100110
>From 33cfa736bb19c6c8ef2d214ecafbc50605eea5eb Mon Sep 17 00:00:00 2001
From: vb-sc <vladislav.belov at syntacore.com>
Date: Wed, 31 Jul 2024 13:37:34 +0300
Subject: [PATCH] [RISCV] full support for riscv_rvv_vector_bits attribute
---
clang/include/clang/AST/Type.h | 4 +
clang/lib/AST/ASTContext.cpp | 33 +++-
clang/lib/AST/ItaniumMangle.cpp | 26 ++-
clang/lib/AST/JSONNodeDumper.cpp | 3 +
clang/lib/AST/TextNodeDumper.cpp | 3 +
clang/lib/AST/TypePrinter.cpp | 6 +
clang/lib/CodeGen/Targets/RISCV.cpp | 22 ++-
clang/lib/Sema/SemaExpr.cpp | 13 +-
clang/lib/Sema/SemaType.cpp | 18 +-
.../attr-riscv-rvv-vector-bits-less-8-call.c | 178 ++++++++++++++++++
.../attr-riscv-rvv-vector-bits-less-8-cast.c | 123 ++++++++++++
.../attr-rvv-vector-bits-bitcast-less-8.c | 106 +++++++++++
.../RISCV/attr-rvv-vector-bits-globals.c | 36 ++--
.../RISCV/attr-rvv-vector-bits-types.c | 78 ++++----
.../riscv-mangle-rvv-fixed-vectors.cpp | 20 +-
clang/test/Sema/attr-riscv-rvv-vector-bits.c | 18 +-
16 files changed, 587 insertions(+), 100 deletions(-)
create mode 100644 clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-call.c
create mode 100644 clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-cast.c
create mode 100644 clang/test/CodeGen/RISCV/attr-rvv-vector-bits-bitcast-less-8.c
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index 25defea58c2dc..a9a87ac5837f1 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -3981,6 +3981,10 @@ enum class VectorKind {
/// is RISC-V RVV fixed-length mask vector
RVVFixedLengthMask,
+
+ RVVFixedLengthMask_1,
+ RVVFixedLengthMask_2,
+ RVVFixedLengthMask_4
};
/// Represents a GCC generic vector type. This type is created using
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 7af9ea7105bb0..1b7aed3b9dba8 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -1983,7 +1983,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
// Adjust the alignment for fixed-length SVE predicates.
Align = 16;
else if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
- VT->getVectorKind() == VectorKind::RVVFixedLengthMask)
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
// Adjust the alignment for fixed-length RVV vectors.
Align = std::min<unsigned>(64, Width);
break;
@@ -9896,7 +9899,13 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
First->getVectorKind() != VectorKind::RVVFixedLengthData &&
Second->getVectorKind() != VectorKind::RVVFixedLengthData &&
First->getVectorKind() != VectorKind::RVVFixedLengthMask &&
- Second->getVectorKind() != VectorKind::RVVFixedLengthMask)
+ Second->getVectorKind() != VectorKind::RVVFixedLengthMask &&
+ First->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
+ Second->getVectorKind() != VectorKind::RVVFixedLengthMask_1 &&
+ First->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
+ Second->getVectorKind() != VectorKind::RVVFixedLengthMask_2 &&
+ First->getVectorKind() != VectorKind::RVVFixedLengthMask_4 &&
+ Second->getVectorKind() != VectorKind::RVVFixedLengthMask_4)
return true;
return false;
@@ -10014,7 +10023,25 @@ bool ASTContext::areCompatibleRVVTypes(QualType FirstType,
BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT);
return FirstType->isRVVVLSBuiltinType() &&
Info.ElementType == BoolTy &&
- getTypeSize(SecondType) == getRVVTypeSize(*this, BT);
+ getTypeSize(SecondType) == ((getRVVTypeSize(*this, BT)));
+ }
+ if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1) {
+ BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT);
+ return FirstType->isRVVVLSBuiltinType() &&
+ Info.ElementType == BoolTy &&
+ getTypeSize(SecondType) == ((getRVVTypeSize(*this, BT) * 8));
+ }
+ if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2) {
+ BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT);
+ return FirstType->isRVVVLSBuiltinType() &&
+ Info.ElementType == BoolTy &&
+ getTypeSize(SecondType) == ((getRVVTypeSize(*this, BT)) * 4);
+ }
+ if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
+ BuiltinVectorTypeInfo Info = getBuiltinVectorTypeInfo(BT);
+ return FirstType->isRVVVLSBuiltinType() &&
+ Info.ElementType == BoolTy &&
+ getTypeSize(SecondType) == ((getRVVTypeSize(*this, BT)) * 2);
}
if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
VT->getVectorKind() == VectorKind::Generic)
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 40ef82785f454..6597c1818d35e 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -4005,7 +4005,10 @@ void CXXNameMangler::mangleAArch64FixedSveVectorType(
void CXXNameMangler::mangleRISCVFixedRVVVectorType(const VectorType *T) {
assert((T->getVectorKind() == VectorKind::RVVFixedLengthData ||
- T->getVectorKind() == VectorKind::RVVFixedLengthMask) &&
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask_4) &&
"expected fixed-length RVV vector!");
QualType EltType = T->getElementType();
@@ -4056,7 +4059,21 @@ void CXXNameMangler::mangleRISCVFixedRVVVectorType(const VectorType *T) {
llvm_unreachable("unexpected element type for fixed-length RVV vector!");
}
- unsigned VecSizeInBits = getASTContext().getTypeInfo(T).Width;
+ unsigned VecSizeInBits;
+ switch (T->getVectorKind()) {
+ case VectorKind::RVVFixedLengthMask_1:
+ VecSizeInBits = 1;
+ break;
+ case VectorKind::RVVFixedLengthMask_2:
+ VecSizeInBits = 2;
+ break;
+ case VectorKind::RVVFixedLengthMask_4:
+ VecSizeInBits = 4;
+ break;
+ default:
+ VecSizeInBits = getASTContext().getTypeInfo(T).Width;
+ break;
+ }
// Apend the LMUL suffix.
auto VScale = getASTContext().getTargetInfo().getVScaleRange(
@@ -4112,7 +4129,10 @@ void CXXNameMangler::mangleType(const VectorType *T) {
mangleAArch64FixedSveVectorType(T);
return;
} else if (T->getVectorKind() == VectorKind::RVVFixedLengthData ||
- T->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
+ T->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
mangleRISCVFixedRVVVectorType(T);
return;
}
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index eeb314b8d32b0..f8f80c8c25157 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -737,6 +737,9 @@ void JSONNodeDumper::VisitVectorType(const VectorType *VT) {
JOS.attribute("vectorKind", "fixed-length rvv data vector");
break;
case VectorKind::RVVFixedLengthMask:
+ case VectorKind::RVVFixedLengthMask_1:
+ case VectorKind::RVVFixedLengthMask_2:
+ case VectorKind::RVVFixedLengthMask_4:
JOS.attribute("vectorKind", "fixed-length rvv mask vector");
break;
}
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index 5ba9523504258..388c927c9aa55 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -1859,6 +1859,9 @@ void TextNodeDumper::VisitVectorType(const VectorType *T) {
OS << " fixed-length rvv data vector";
break;
case VectorKind::RVVFixedLengthMask:
+ case VectorKind::RVVFixedLengthMask_1:
+ case VectorKind::RVVFixedLengthMask_2:
+ case VectorKind::RVVFixedLengthMask_4:
OS << " fixed-length rvv mask vector";
break;
}
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index ffec3ef9d2269..f9bf63aa86e73 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -721,6 +721,9 @@ void TypePrinter::printVectorBefore(const VectorType *T, raw_ostream &OS) {
break;
case VectorKind::RVVFixedLengthData:
case VectorKind::RVVFixedLengthMask:
+ case VectorKind::RVVFixedLengthMask_1:
+ case VectorKind::RVVFixedLengthMask_2:
+ case VectorKind::RVVFixedLengthMask_4:
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
OS << "__attribute__((__riscv_rvv_vector_bits__(";
@@ -801,6 +804,9 @@ void TypePrinter::printDependentVectorBefore(
break;
case VectorKind::RVVFixedLengthData:
case VectorKind::RVVFixedLengthMask:
+ case VectorKind::RVVFixedLengthMask_1:
+ case VectorKind::RVVFixedLengthMask_2:
+ case VectorKind::RVVFixedLengthMask_4:
// FIXME: We prefer to print the size directly here, but have no way
// to get the size of the type.
OS << "__attribute__((__riscv_rvv_vector_bits__(";
diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp
index f2add9351c03c..826a1ec2c9d38 100644
--- a/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -327,11 +327,20 @@ ABIArgInfo RISCVABIInfo::coerceVLSVector(QualType Ty) const {
getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts());
unsigned NumElts = VT->getNumElements();
- llvm::Type *EltType;
- if (VT->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ llvm::Type *EltType = llvm::Type::getInt1Ty(getVMContext());
+ switch (VT->getVectorKind()) {
+ case VectorKind::RVVFixedLengthMask_1:
+ break;
+ case VectorKind::RVVFixedLengthMask_2:
+ NumElts *= 2;
+ break;
+ case VectorKind::RVVFixedLengthMask_4:
+ NumElts *= 4;
+ break;
+ case VectorKind::RVVFixedLengthMask:
NumElts *= 8;
- EltType = llvm::Type::getInt1Ty(getVMContext());
- } else {
+ break;
+ default:
assert(VT->getVectorKind() == VectorKind::RVVFixedLengthData &&
"Unexpected vector kind");
EltType = CGT.ConvertType(VT->getElementType());
@@ -453,7 +462,10 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (const VectorType *VT = Ty->getAs<VectorType>())
if (VT->getVectorKind() == VectorKind::RVVFixedLengthData ||
- VT->getVectorKind() == VectorKind::RVVFixedLengthMask)
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
+ VT->getVectorKind() == VectorKind::RVVFixedLengthMask_4)
return coerceVLSVector(Ty);
// Aggregates which are <= 2*XLen will be passed in registers if possible,
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 439db55668cc6..d8156f68205eb 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -10115,7 +10115,10 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
VecType->getVectorKind() == VectorKind::SveFixedLengthPredicate)
return true;
if (VecType->getVectorKind() == VectorKind::RVVFixedLengthData ||
- VecType->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ VecType->getVectorKind() == VectorKind::RVVFixedLengthMask ||
+ VecType->getVectorKind() == VectorKind::RVVFixedLengthMask_1 ||
+ VecType->getVectorKind() == VectorKind::RVVFixedLengthMask_2 ||
+ VecType->getVectorKind() == VectorKind::RVVFixedLengthMask_4) {
SVEorRVV = 1;
return true;
}
@@ -10147,7 +10150,13 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
VectorKind::SveFixedLengthPredicate)
return true;
if (SecondVecType->getVectorKind() == VectorKind::RVVFixedLengthData ||
- SecondVecType->getVectorKind() == VectorKind::RVVFixedLengthMask) {
+ SecondVecType->getVectorKind() == VectorKind::RVVFixedLengthMask ||
+ SecondVecType->getVectorKind() ==
+ VectorKind::RVVFixedLengthMask_1 ||
+ SecondVecType->getVectorKind() ==
+ VectorKind::RVVFixedLengthMask_2 ||
+ SecondVecType->getVectorKind() ==
+ VectorKind::RVVFixedLengthMask_4) {
SVEorRVV = 1;
return true;
}
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 6fa39cdccef2b..6c87792b382f8 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -8349,14 +8349,28 @@ static void HandleRISCVRVVVectorBitsTypeAttr(QualType &CurType,
unsigned NumElts;
if (Info.ElementType == S.Context.BoolTy) {
NumElts = VecSize / S.Context.getCharWidth();
- VecKind = VectorKind::RVVFixedLengthMask;
+ if (!NumElts) {
+ NumElts = 1;
+ switch (VecSize) {
+ case 1:
+ VecKind = VectorKind::RVVFixedLengthMask_1;
+ break;
+ case 2:
+ VecKind = VectorKind::RVVFixedLengthMask_2;
+ break;
+ case 4:
+ VecKind = VectorKind::RVVFixedLengthMask_4;
+ break;
+ }
+ } else
+ VecKind = VectorKind::RVVFixedLengthMask;
} else {
ExpectedSize *= EltSize;
NumElts = VecSize / EltSize;
}
// The attribute vector size must match -mrvv-vector-bits.
- if (ExpectedSize % 8 != 0 || VecSize != ExpectedSize) {
+ if (VecSize != ExpectedSize) {
S.Diag(Attr.getLoc(), diag::err_attribute_bad_rvv_vector_size)
<< VecSize << ExpectedSize;
Attr.setInvalid();
diff --git a/clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-call.c b/clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-call.c
new file mode 100644
index 0000000000000..e2f02dc64f766
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-call.c
@@ -0,0 +1,178 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=1 -mvscale-max=1 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-64
+// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=2 -mvscale-max=2 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-128
+
+// REQUIRES: riscv-registered-target
+
+#include <riscv_vector.h>
+
+typedef vbool32_t fixed_bool32_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/32)));
+typedef vbool64_t fixed_bool64_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/64)));
+
+//===----------------------------------------------------------------------===//
+// fixed, fixed
+//===----------------------------------------------------------------------===//
+
+// CHECK-64-LABEL: @call_bool32_ff(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE4:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1_COERCE:%.*]], <vscale x 2 x i1> [[OP2_COERCE:%.*]], i64 2)
+// CHECK-64-NEXT: store <vscale x 2 x i1> [[TMP0]], ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA10:![0-9]+]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 2 x i1> [[TMP2]]
+//
+// CHECK-128-LABEL: @call_bool32_ff(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE4:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1_COERCE:%.*]], <vscale x 2 x i1> [[OP2_COERCE:%.*]], i64 4)
+// CHECK-128-NEXT: store <vscale x 2 x i1> [[TMP0]], ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA10:![0-9]+]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 2 x i1> [[TMP2]]
+//
+fixed_bool32_t call_bool32_ff(fixed_bool32_t op1, fixed_bool32_t op2) {
+ return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen / 32);
+}
+
+// CHECK-64-LABEL: @call_bool64_ff(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE4:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1_COERCE:%.*]], <vscale x 1 x i1> [[OP2_COERCE:%.*]], i64 1)
+// CHECK-64-NEXT: store <vscale x 1 x i1> [[TMP0]], ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA11:![0-9]+]]
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA10]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP2:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 1 x i1> [[TMP2]]
+//
+// CHECK-128-LABEL: @call_bool64_ff(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE4:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1_COERCE:%.*]], <vscale x 1 x i1> [[OP2_COERCE:%.*]], i64 2)
+// CHECK-128-NEXT: store <vscale x 1 x i1> [[TMP0]], ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA11:![0-9]+]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE4]], align 1, !tbaa [[TBAA10]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP2:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 1 x i1> [[TMP2]]
+//
+fixed_bool64_t call_bool64_ff(fixed_bool64_t op1, fixed_bool64_t op2) {
+ return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen / 64);
+}
+
+//===----------------------------------------------------------------------===//
+// fixed, scalable
+//===----------------------------------------------------------------------===//
+
+// CHECK-64-LABEL: @call_bool32_fs(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE2:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1_COERCE:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 2)
+// CHECK-64-NEXT: store <vscale x 2 x i1> [[TMP0]], ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA6]]
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA10]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 2 x i1> [[TMP2]]
+//
+// CHECK-128-LABEL: @call_bool32_fs(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE2:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1_COERCE:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 4)
+// CHECK-128-NEXT: store <vscale x 2 x i1> [[TMP0]], ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA10]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 2 x i1> [[TMP2]]
+//
+fixed_bool32_t call_bool32_fs(fixed_bool32_t op1, vbool32_t op2) {
+ return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen / 32);
+}
+
+// CHECK-64-LABEL: @call_bool64_fs(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE2:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1_COERCE:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 1)
+// CHECK-64-NEXT: store <vscale x 1 x i1> [[TMP0]], ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA11]]
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA10]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP2:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 1 x i1> [[TMP2]]
+//
+// CHECK-128-LABEL: @call_bool64_fs(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE2:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1_COERCE:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 2)
+// CHECK-128-NEXT: store <vscale x 1 x i1> [[TMP0]], ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA11]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE2]], align 1, !tbaa [[TBAA10]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP2:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 1 x i1> [[TMP2]]
+//
+fixed_bool64_t call_bool64_fs(fixed_bool64_t op1, vbool64_t op2) {
+ return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen / 64);
+}
+
+//===----------------------------------------------------------------------===//
+// scalable, scalable
+//===----------------------------------------------------------------------===//
+
+// CHECK-64-LABEL: @call_bool32_ss(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 2)
+// CHECK-64-NEXT: store <vscale x 2 x i1> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 2 x i1> [[TMP2]]
+//
+// CHECK-128-LABEL: @call_bool32_ss(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.riscv.vmand.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 4)
+// CHECK-128-NEXT: store <vscale x 2 x i1> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP2:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 2 x i1> [[TMP2]]
+//
+fixed_bool32_t call_bool32_ss(vbool32_t op1, vbool32_t op2) {
+ return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen / 32);
+}
+
+// CHECK-64-LABEL: @call_bool64_ss(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 1)
+// CHECK-64-NEXT: store <vscale x 1 x i1> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA11]]
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP2:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 1 x i1> [[TMP2]]
+//
+// CHECK-128-LABEL: @call_bool64_ss(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 2)
+// CHECK-128-NEXT: store <vscale x 1 x i1> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA11]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP1]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP2:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 1 x i1> [[TMP2]]
+//
+fixed_bool64_t call_bool64_ss(vbool64_t op1, vbool64_t op2) {
+ return __riscv_vmand(op1, op2, __riscv_v_fixed_vlen / 64);
+}
diff --git a/clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-cast.c b/clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-cast.c
new file mode 100644
index 0000000000000..f0fa7e8d07b4d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/attr-riscv-rvv-vector-bits-less-8-cast.c
@@ -0,0 +1,123 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=1 -mvscale-max=1 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-64
+// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=2 -mvscale-max=2 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-128
+
+// REQUIRES: riscv-registered-target
+
+#include <stdint.h>
+
+typedef __rvv_bool8_t vbool8_t;
+typedef __rvv_bool16_t vbool16_t;
+typedef __rvv_bool32_t vbool32_t;
+typedef __rvv_bool64_t vbool64_t;
+typedef vbool8_t fixed_bool8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/8)));
+typedef vbool16_t fixed_bool16_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/16)));
+typedef vbool32_t fixed_bool32_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/32)));
+typedef vbool64_t fixed_bool64_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/64)));
+
+// CHECK-64-LABEL: @from_vbool8_t(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: ret <vscale x 8 x i1> [[TYPE:%.*]]
+//
+// CHECK-128-LABEL: @from_vbool8_t(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: ret <vscale x 8 x i1> [[TYPE:%.*]]
+//
+fixed_bool8_t from_vbool8_t(vbool8_t type) {
+ return type;
+}
+
+// CHECK-64-LABEL: @from_vbool16_t(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 4 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i1>, align 1
+// CHECK-64-NEXT: store <vscale x 4 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10:![0-9]+]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+//
+// CHECK-128-LABEL: @from_vbool16_t(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 4 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 4 x i1>, align 1
+// CHECK-128-NEXT: store <vscale x 4 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10:![0-9]+]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <vscale x 4 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 4 x i1> [[TMP1]]
+//
+fixed_bool16_t from_vbool16_t(vbool16_t type) {
+ return type;
+}
+// CHECK-64-LABEL: @from_vbool32_t(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-64-NEXT: store <vscale x 2 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA11:![0-9]+]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+//
+// CHECK-128-LABEL: @from_vbool32_t(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: store <vscale x 2 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA11:![0-9]+]]
+// CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+//
+fixed_bool32_t from_vbool32_t(vbool32_t type) {
+ return type;
+}
+
+// CHECK-64-LABEL: @to_vbool32_t(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: ret <vscale x 2 x i1> [[TYPE_COERCE:%.*]]
+//
+// CHECK-128-LABEL: @to_vbool32_t(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: ret <vscale x 2 x i1> [[TYPE_COERCE:%.*]]
+//
+vbool32_t to_vbool32_t(fixed_bool32_t type) {
+ return type;
+}
+
+// CHECK-64-LABEL: @from_vbool64_t(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-64-NEXT: store <vscale x 1 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA13:![0-9]+]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-64-NEXT: store <1 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: [[TMP1:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-64-NEXT: ret <vscale x 1 x i1> [[TMP1]]
+//
+// CHECK-128-LABEL: @from_vbool64_t(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: [[RETVAL_COERCE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: store <vscale x 1 x i1> [[TYPE:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA13:![0-9]+]]
+// CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA10]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP0]], ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <vscale x 1 x i1>, ptr [[RETVAL_COERCE]], align 1
+// CHECK-128-NEXT: ret <vscale x 1 x i1> [[TMP1]]
+//
+fixed_bool64_t from_vbool64_t(vbool64_t type) {
+ return type;
+}
+
+// CHECK-64-LABEL: @to_vbool64_t(
+// CHECK-64-NEXT: entry:
+// CHECK-64-NEXT: ret <vscale x 1 x i1> [[TYPE_COERCE:%.*]]
+//
+// CHECK-128-LABEL: @to_vbool64_t(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: ret <vscale x 1 x i1> [[TYPE_COERCE:%.*]]
+//
+vbool64_t to_vbool64_t(fixed_bool64_t type) {
+ return type;
+}
diff --git a/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-bitcast-less-8.c b/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-bitcast-less-8.c
new file mode 100644
index 0000000000000..984e96a01a4ea
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-bitcast-less-8.c
@@ -0,0 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +f -target-feature +d -target-feature +zve64d -mvscale-min=2 -mvscale-max=2 -O1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-128
+
+// REQUIRES: riscv-registered-target
+
+#include <stdint.h>
+
+typedef __rvv_int8m1_t vint8m1_t;
+typedef __rvv_uint8m1_t vuint8m1_t;
+typedef __rvv_int16m1_t vint16m1_t;
+typedef __rvv_uint16m1_t vuint16m1_t;
+typedef __rvv_int32m1_t vint32m1_t;
+typedef __rvv_uint32m1_t vuint32m1_t;
+typedef __rvv_int64m1_t vint64m1_t;
+typedef __rvv_uint64m1_t vuint64m1_t;
+typedef __rvv_float32m1_t vfloat32m1_t;
+typedef __rvv_float64m1_t vfloat64m1_t;
+
+typedef __rvv_bool1_t vbool1_t;
+typedef __rvv_bool2_t vbool2_t;
+typedef __rvv_bool4_t vbool4_t;
+typedef __rvv_bool8_t vbool8_t;
+typedef __rvv_bool16_t vbool16_t;
+typedef __rvv_bool32_t vbool32_t;
+typedef __rvv_bool64_t vbool64_t;
+
+typedef vint64m1_t fixed_int64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
+typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
+typedef vbool1_t fixed_bool1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen)));
+typedef vbool2_t fixed_bool2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 2)));
+typedef vbool4_t fixed_bool4_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 4)));
+typedef vbool8_t fixed_bool8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 8)));
+typedef vbool16_t fixed_bool16_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 16)));
+typedef vbool32_t fixed_bool32_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 32)));
+typedef vbool64_t fixed_bool64_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 64)));
+
+#define DEFINE_STRUCT(ty) \
+ struct struct_##ty { \
+ fixed_##ty##_t x, y[3]; \
+ } struct_##ty;
+
+DEFINE_STRUCT(int64m1)
+DEFINE_STRUCT(float64m1)
+DEFINE_STRUCT(bool1)
+DEFINE_STRUCT(bool2)
+DEFINE_STRUCT(bool4)
+DEFINE_STRUCT(bool8)
+DEFINE_STRUCT(bool16)
+DEFINE_STRUCT(bool32)
+DEFINE_STRUCT(bool64)
+
+//===----------------------------------------------------------------------===//
+// bool
+//===----------------------------------------------------------------------===//
+
+// CHECK-128-LABEL: @read_bool32(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <1 x i8>, align 1
+// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[Y]], align 1, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i1>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: ret <vscale x 2 x i1> [[TMP1]]
+//
+vbool32_t read_bool32(struct struct_bool32 *s) {
+ return s->y[0];
+}
+
+// CHECK-128-LABEL: @write_bool32(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
+// CHECK-128-NEXT: store <vscale x 2 x i1> [[X:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 1
+// CHECK-128-NEXT: store <1 x i8> [[TMP0]], ptr [[Y]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: ret void
+//
+void write_bool32(struct struct_bool32 *s, vbool32_t x) {
+ s->y[0] = x;
+}
+
+// CHECK-128-LABEL: @read_bool64(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <1 x i8>, align 1
+// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 1
+// CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[Y]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: store <1 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: [[TMP1:%.*]] = load <vscale x 1 x i1>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: ret <vscale x 1 x i1> [[TMP1]]
+//
+vbool64_t read_bool64(struct struct_bool64 *s) {
+ return s->y[0];
+}
+
+// CHECK-128-LABEL: @write_bool64(
+// CHECK-128-NEXT: entry:
+// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 1 x i1>, align 1
+// CHECK-128-NEXT: store <vscale x 1 x i1> [[X:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA11:![0-9]+]]
+// CHECK-128-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: [[Y:%.*]] = getelementptr inbounds i8, ptr [[S:%.*]], i64 1
+// CHECK-128-NEXT: store <1 x i8> [[TMP0]], ptr [[Y]], align 1, !tbaa [[TBAA6]]
+// CHECK-128-NEXT: ret void
+//
+void write_bool64(struct struct_bool64 *s, vbool64_t x) {
+ s->y[0] = x;
+}
diff --git a/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-globals.c b/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-globals.c
index 973a25ee96656..663e436b4dab6 100644
--- a/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-globals.c
+++ b/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-globals.c
@@ -43,13 +43,13 @@ fixed_bool32_t global_bool32;
// CHECK-64-LABEL: @write_global_i64(
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <1 x i64> @llvm.vector.extract.v1i64.nxv1i64(<vscale x 1 x i64> [[V:%.*]], i64 0)
-// CHECK-64-NEXT: store <1 x i64> [[CAST_FIXED]], ptr @global_i64, align 8, !tbaa [[TBAA4:![0-9]+]]
+// CHECK-64-NEXT: store <1 x i64> [[CAST_FIXED]], ptr @global_i64, align 8, !tbaa [[TBAA6:![0-9]+]]
// CHECK-64-NEXT: ret void
//
// CHECK-256-LABEL: @write_global_i64(
// CHECK-256-NEXT: entry:
// CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64(<vscale x 1 x i64> [[V:%.*]], i64 0)
-// CHECK-256-NEXT: store <4 x i64> [[CAST_FIXED]], ptr @global_i64, align 8, !tbaa [[TBAA4:![0-9]+]]
+// CHECK-256-NEXT: store <4 x i64> [[CAST_FIXED]], ptr @global_i64, align 8, !tbaa [[TBAA6:![0-9]+]]
// CHECK-256-NEXT: ret void
//
void write_global_i64(vint64m1_t v) { global_i64 = v; }
@@ -58,14 +58,14 @@ void write_global_i64(vint64m1_t v) { global_i64 = v; }
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[V:%.*]] to <vscale x 8 x i8>
// CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
-// CHECK-64-NEXT: store <8 x i8> [[CAST_FIXED]], ptr @global_bool1, align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: store <8 x i8> [[CAST_FIXED]], ptr @global_bool1, align 8, !tbaa [[TBAA6]]
// CHECK-64-NEXT: ret void
//
// CHECK-256-LABEL: @write_global_bool1(
// CHECK-256-NEXT: entry:
// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <vscale x 64 x i1> [[V:%.*]] to <vscale x 8 x i8>
// CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8(<vscale x 8 x i8> [[TMP0]], i64 0)
-// CHECK-256-NEXT: store <32 x i8> [[CAST_FIXED]], ptr @global_bool1, align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: store <32 x i8> [[CAST_FIXED]], ptr @global_bool1, align 8, !tbaa [[TBAA6]]
// CHECK-256-NEXT: ret void
//
void write_global_bool1(vbool1_t v) { global_bool1 = v; }
@@ -74,14 +74,14 @@ void write_global_bool1(vbool1_t v) { global_bool1 = v; }
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[V:%.*]] to <vscale x 2 x i8>
// CHECK-64-NEXT: [[CAST_FIXED:%.*]] = tail call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
-// CHECK-64-NEXT: store <2 x i8> [[CAST_FIXED]], ptr @global_bool4, align 2, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: store <2 x i8> [[CAST_FIXED]], ptr @global_bool4, align 2, !tbaa [[TBAA6]]
// CHECK-64-NEXT: ret void
//
// CHECK-256-LABEL: @write_global_bool4(
// CHECK-256-NEXT: entry:
// CHECK-256-NEXT: [[TMP0:%.*]] = bitcast <vscale x 16 x i1> [[V:%.*]] to <vscale x 2 x i8>
// CHECK-256-NEXT: [[CAST_FIXED:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8(<vscale x 2 x i8> [[TMP0]], i64 0)
-// CHECK-256-NEXT: store <8 x i8> [[CAST_FIXED]], ptr @global_bool4, align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: store <8 x i8> [[CAST_FIXED]], ptr @global_bool4, align 8, !tbaa [[TBAA6]]
// CHECK-256-NEXT: ret void
//
void write_global_bool4(vbool4_t v) { global_bool4 = v; }
@@ -90,9 +90,9 @@ void write_global_bool4(vbool4_t v) { global_bool4 = v; }
// CHECK-256-LABEL: @write_global_bool32(
// CHECK-256-NEXT: entry:
// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <vscale x 2 x i1>, align 1
-// CHECK-256-NEXT: store <vscale x 2 x i1> [[V:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA7:![0-9]+]]
-// CHECK-256-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: store <1 x i8> [[TMP0]], ptr @global_bool32, align 1, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: store <vscale x 2 x i1> [[V:%.*]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA9:![0-9]+]]
+// CHECK-256-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-256-NEXT: store <1 x i8> [[TMP0]], ptr @global_bool32, align 1, !tbaa [[TBAA6]]
// CHECK-256-NEXT: ret void
//
void write_global_bool32(vbool32_t v) { global_bool32 = v; }
@@ -104,13 +104,13 @@ void write_global_bool32(vbool32_t v) { global_bool32 = v; }
// CHECK-64-LABEL: @read_global_i64(
// CHECK-64-NEXT: entry:
-// CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr @global_i64, align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = load <1 x i64>, ptr @global_i64, align 8, !tbaa [[TBAA6]]
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v1i64(<vscale x 1 x i64> undef, <1 x i64> [[TMP0]], i64 0)
// CHECK-64-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
// CHECK-256-LABEL: @read_global_i64(
// CHECK-256-NEXT: entry:
-// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @global_i64, align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i64>, ptr @global_i64, align 8, !tbaa [[TBAA6]]
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 1 x i64> @llvm.vector.insert.nxv1i64.v4i64(<vscale x 1 x i64> undef, <4 x i64> [[TMP0]], i64 0)
// CHECK-256-NEXT: ret <vscale x 1 x i64> [[CAST_SCALABLE]]
//
@@ -118,14 +118,14 @@ vint64m1_t read_global_i64() { return global_i64; }
// CHECK-64-LABEL: @read_global_bool1(
// CHECK-64-NEXT: entry:
-// CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @global_bool1, align 8, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @global_bool1, align 8, !tbaa [[TBAA6]]
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v8i8(<vscale x 8 x i8> undef, <8 x i8> [[TMP0]], i64 0)
// CHECK-64-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-64-NEXT: ret <vscale x 64 x i1> [[TMP1]]
//
// CHECK-256-LABEL: @read_global_bool1(
// CHECK-256-NEXT: entry:
-// CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr @global_bool1, align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[TMP0:%.*]] = load <32 x i8>, ptr @global_bool1, align 8, !tbaa [[TBAA6]]
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 8 x i8> @llvm.vector.insert.nxv8i8.v32i8(<vscale x 8 x i8> undef, <32 x i8> [[TMP0]], i64 0)
// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 8 x i8> [[CAST_SCALABLE]] to <vscale x 64 x i1>
// CHECK-256-NEXT: ret <vscale x 64 x i1> [[TMP1]]
@@ -134,14 +134,14 @@ vbool1_t read_global_bool1() { return global_bool1; }
// CHECK-64-LABEL: @read_global_bool4(
// CHECK-64-NEXT: entry:
-// CHECK-64-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr @global_bool4, align 2, !tbaa [[TBAA4]]
+// CHECK-64-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr @global_bool4, align 2, !tbaa [[TBAA6]]
// CHECK-64-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v2i8(<vscale x 2 x i8> undef, <2 x i8> [[TMP0]], i64 0)
// CHECK-64-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
// CHECK-64-NEXT: ret <vscale x 16 x i1> [[TMP1]]
//
// CHECK-256-LABEL: @read_global_bool4(
// CHECK-256-NEXT: entry:
-// CHECK-256-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @global_bool4, align 8, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @global_bool4, align 8, !tbaa [[TBAA6]]
// CHECK-256-NEXT: [[CAST_SCALABLE:%.*]] = tail call <vscale x 2 x i8> @llvm.vector.insert.nxv2i8.v8i8(<vscale x 2 x i8> undef, <8 x i8> [[TMP0]], i64 0)
// CHECK-256-NEXT: [[TMP1:%.*]] = bitcast <vscale x 2 x i8> [[CAST_SCALABLE]] to <vscale x 16 x i1>
// CHECK-256-NEXT: ret <vscale x 16 x i1> [[TMP1]]
@@ -152,9 +152,9 @@ vbool4_t read_global_bool4() { return global_bool4; }
// CHECK-256-LABEL: @read_global_bool32(
// CHECK-256-NEXT: entry:
// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <1 x i8>, align 1
-// CHECK-256-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr @global_bool32, align 1, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: store <1 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA4]]
-// CHECK-256-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i1>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA4]]
+// CHECK-256-NEXT: [[TMP0:%.*]] = load <1 x i8>, ptr @global_bool32, align 1, !tbaa [[TBAA6]]
+// CHECK-256-NEXT: store <1 x i8> [[TMP0]], ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
+// CHECK-256-NEXT: [[TMP1:%.*]] = load <vscale x 2 x i1>, ptr [[SAVED_VALUE]], align 1, !tbaa [[TBAA6]]
// CHECK-256-NEXT: ret <vscale x 2 x i1> [[TMP1]]
//
vbool32_t read_global_bool32() { return global_bool32; }
diff --git a/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-types.c b/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-types.c
index cae42ec76c797..30421ef4554bf 100644
--- a/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-types.c
+++ b/clang/test/CodeGen/RISCV/attr-rvv-vector-bits-types.c
@@ -149,15 +149,9 @@ typedef vuint64m8_t fixed_uint64m8_t __attribute__((riscv_rvv_vector_bits(__risc
typedef vfloat32m8_t fixed_float32m8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen * 8)));
typedef vfloat64m8_t fixed_float64m8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen * 8)));
-#if __riscv_v_fixed_vlen / 64 >= 8
typedef vbool64_t fixed_bool64_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 64)));
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
typedef vbool32_t fixed_bool32_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 32)));
-#endif
-#if __riscv_v_fixed_vlen / 16 >= 8
typedef vbool16_t fixed_bool16_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 16)));
-#endif
typedef vbool8_t fixed_bool8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 8)));
typedef vbool4_t fixed_bool4_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 4)));
typedef vbool2_t fixed_bool2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 2)));
@@ -224,15 +218,9 @@ DEFINE_STRUCT(bool1)
DEFINE_STRUCT(bool2)
DEFINE_STRUCT(bool4)
DEFINE_STRUCT(bool8)
-#if __riscv_v_fixed_vlen / 16 >= 8
DEFINE_STRUCT(bool16)
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
DEFINE_STRUCT(bool32)
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
DEFINE_STRUCT(bool64)
-#endif
DEFINE_UNION(int8m1)
DEFINE_UNION(int16m1)
@@ -282,15 +270,9 @@ DEFINE_UNION(bool1)
DEFINE_UNION(bool2)
DEFINE_UNION(bool4)
DEFINE_UNION(bool8)
-#if __riscv_v_fixed_vlen / 16 >= 8
DEFINE_UNION(bool16)
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
DEFINE_UNION(bool32)
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
DEFINE_UNION(bool64)
-#endif
//===----------------------------------------------------------------------===//
// Global variables
@@ -351,15 +333,9 @@ fixed_bool1_t global_bool1;
fixed_bool2_t global_bool2;
fixed_bool4_t global_bool4;
fixed_bool8_t global_bool8;
-#if __riscv_v_fixed_vlen / 16 >= 8
fixed_bool16_t global_bool16;
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
fixed_bool32_t global_bool32;
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
fixed_bool64_t global_bool64;
-#endif
//===----------------------------------------------------------------------===//
// Global arrays
@@ -420,15 +396,9 @@ fixed_bool1_t global_arr_bool1[3];
fixed_bool2_t global_arr_bool2[3];
fixed_bool4_t global_arr_bool4[3];
fixed_bool8_t global_arr_bool8[3];
-#if __riscv_v_fixed_vlen / 16 >= 8
fixed_bool16_t global_arr_bool16[3];
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
fixed_bool32_t global_arr_bool32[3];
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
fixed_bool64_t global_arr_bool64[3];
-#endif
//===----------------------------------------------------------------------===//
// Locals
@@ -483,15 +453,9 @@ void f() {
fixed_bool2_t local_bool2;
fixed_bool4_t local_bool4;
fixed_bool8_t local_bool8;
-#if __riscv_v_fixed_vlen / 16 >= 8
fixed_bool16_t local_bool16;
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
fixed_bool32_t local_bool32;
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
fixed_bool64_t local_bool64;
-#endif
// Arrays
fixed_int8m1_t local_arr_i8[3];
@@ -558,15 +522,9 @@ void f() {
fixed_bool2_t local_arr_bool2[3];
fixed_bool4_t local_arr_bool4[3];
fixed_bool8_t local_arr_bool8[3];
-#if __riscv_v_fixed_vlen / 16 >= 8
fixed_bool16_t local_arr_bool16[3];
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
fixed_bool32_t local_arr_bool32[3];
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
fixed_bool64_t local_arr_bool64[3];
-#endif
}
//===----------------------------------------------------------------------===//
@@ -616,6 +574,9 @@ void f() {
// CHECK-64-NEXT: %struct.struct_bool2 = type { <4 x i8> }
// CHECK-64-NEXT: %struct.struct_bool4 = type { <2 x i8> }
// CHECK-64-NEXT: %struct.struct_bool8 = type { <1 x i8> }
+// CHECK-64-NEXT: %struct.struct_bool16 = type { <1 x i8> }
+// CHECK-64-NEXT: %struct.struct_bool32 = type { <1 x i8> }
+// CHECK-64-NEXT: %struct.struct_bool64 = type { <1 x i8> }
// CHECK-128: %struct.struct_int8m1 = type { <16 x i8> }
// CHECK-128-NEXT: %struct.struct_int16m1 = type { <8 x i16> }
@@ -662,6 +623,8 @@ void f() {
// CHECK-128-NEXT: %struct.struct_bool4 = type { <4 x i8> }
// CHECK-128-NEXT: %struct.struct_bool8 = type { <2 x i8> }
// CHECK-128-NEXT: %struct.struct_bool16 = type { <1 x i8> }
+// CHECK-128-NEXT: %struct.struct_bool32 = type { <1 x i8> }
+// CHECK-128-NEXT: %struct.struct_bool64 = type { <1 x i8> }
// CHECK-256: %struct.struct_int8m1 = type { <32 x i8> }
// CHECK-256-NEXT: %struct.struct_int16m1 = type { <16 x i16> }
@@ -709,6 +672,7 @@ void f() {
// CHECK-256-NEXT: %struct.struct_bool8 = type { <4 x i8> }
// CHECK-256-NEXT: %struct.struct_bool16 = type { <2 x i8> }
// CHECK-256-NEXT: %struct.struct_bool32 = type { <1 x i8> }
+// CHECK-256-NEXT: %struct.struct_bool64 = type { <1 x i8> }
// CHECK-512: %struct.struct_int8m1 = type { <64 x i8> }
// CHECK-512-NEXT: %struct.struct_int16m1 = type { <32 x i16> }
@@ -850,6 +814,9 @@ void f() {
// CHECK-64-NEXT: %union.union_bool2 = type { <4 x i8> }
// CHECK-64-NEXT: %union.union_bool4 = type { <2 x i8> }
// CHECK-64-NEXT: %union.union_bool8 = type { <1 x i8> }
+// CHECK-64-NEXT: %union.union_bool16 = type { <1 x i8> }
+// CHECK-64-NEXT: %union.union_bool32 = type { <1 x i8> }
+// CHECK-64-NEXT: %union.union_bool64 = type { <1 x i8> }
// CHECK-128: %union.union_int8m1 = type { <16 x i8> }
// CHECK-128-NEXT: %union.union_int16m1 = type { <8 x i16> }
@@ -896,6 +863,8 @@ void f() {
// CHECK-128-NEXT: %union.union_bool4 = type { <4 x i8> }
// CHECK-128-NEXT: %union.union_bool8 = type { <2 x i8> }
// CHECK-128-NEXT: %union.union_bool16 = type { <1 x i8> }
+// CHECK-128-NEXT: %union.union_bool32 = type { <1 x i8> }
+// CHECK-128-NEXT: %union.union_bool64 = type { <1 x i8> }
// CHECK-256: %union.union_int8m1 = type { <32 x i8> }
// CHECK-256-NEXT: %union.union_int16m1 = type { <16 x i16> }
@@ -943,6 +912,7 @@ void f() {
// CHECK-256-NEXT: %union.union_bool8 = type { <4 x i8> }
// CHECK-256-NEXT: %union.union_bool16 = type { <2 x i8> }
// CHECK-256-NEXT: %union.union_bool32 = type { <1 x i8> }
+// CHECK-256-NEXT: %union.union_bool64 = type { <1 x i8> }
// CHECK-512: %union.union_int8m1 = type { <64 x i8> }
// CHECK-512-NEXT: %union.union_int16m1 = type { <32 x i16> }
@@ -1087,6 +1057,9 @@ void f() {
// CHECK-64-NEXT: @global_bool2 ={{.*}} global <4 x i8> zeroinitializer, align 4
// CHECK-64-NEXT: @global_bool4 ={{.*}} global <2 x i8> zeroinitializer, align 2
// CHECK-64-NEXT: @global_bool8 ={{.*}} global <1 x i8> zeroinitializer, align 1
+// CHECK-64-NEXT: @global_bool16 ={{.*}} global <1 x i8> zeroinitializer, align 1
+// CHECK-64-NEXT: @global_bool32 ={{.*}} global <1 x i8> zeroinitializer, align 1
+// CHECK-64-NEXT: @global_bool64 ={{.*}} global <1 x i8> zeroinitializer, align 1
// CHECK-128: @global_i8 ={{.*}} global <16 x i8> zeroinitializer, align 8
// CHECK-128-NEXT: @global_i16 ={{.*}} global <8 x i16> zeroinitializer, align 8
@@ -1133,6 +1106,8 @@ void f() {
// CHECK-128-NEXT: @global_bool4 ={{.*}} global <4 x i8> zeroinitializer, align 4
// CHECK-128-NEXT: @global_bool8 ={{.*}} global <2 x i8> zeroinitializer, align 2
// CHECK-128-NEXT: @global_bool16 ={{.*}} global <1 x i8> zeroinitializer, align 1
+// CHECK-128-NEXT: @global_bool32 ={{.*}} global <1 x i8> zeroinitializer, align 1
+// CHECK-128-NEXT: @global_bool64 ={{.*}} global <1 x i8> zeroinitializer, align 1
// CHECK-256: @global_i8 ={{.*}} global <32 x i8> zeroinitializer, align 8
// CHECK-256-NEXT: @global_i16 ={{.*}} global <16 x i16> zeroinitializer, align 8
@@ -1180,6 +1155,7 @@ void f() {
// CHECK-256-NEXT: @global_bool8 ={{.*}} global <4 x i8> zeroinitializer, align 4
// CHECK-256-NEXT: @global_bool16 ={{.*}} global <2 x i8> zeroinitializer, align 2
// CHECK-256-NEXT: @global_bool32 ={{.*}} global <1 x i8> zeroinitializer, align 1
+// CHECK-256-NEXT: @global_bool64 ={{.*}} global <1 x i8> zeroinitializer, align 1
// CHECK-512: @global_i8 ={{.*}} global <64 x i8> zeroinitializer, align 8
// CHECK-512-NEXT: @global_i16 ={{.*}} global <32 x i16> zeroinitializer, align 8
@@ -1324,6 +1300,9 @@ void f() {
// CHECK-64-NEXT: @global_arr_bool2 ={{.*}} global [3 x <4 x i8>] zeroinitializer, align 4
// CHECK-64-NEXT: @global_arr_bool4 ={{.*}} global [3 x <2 x i8>] zeroinitializer, align 2
// CHECK-64-NEXT: @global_arr_bool8 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
+// CHECK-64-NEXT: @global_arr_bool16 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
+// CHECK-64-NEXT: @global_arr_bool32 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
+// CHECK-64-NEXT: @global_arr_bool64 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
// CHECK-128: @global_arr_i8 ={{.*}} global [3 x <16 x i8>] zeroinitializer, align 8
// CHECK-128-NEXT: @global_arr_i16 ={{.*}} global [3 x <8 x i16>] zeroinitializer, align 8
@@ -1370,6 +1349,8 @@ void f() {
// CHECK-128-NEXT: @global_arr_bool4 ={{.*}} global [3 x <4 x i8>] zeroinitializer, align 4
// CHECK-128-NEXT: @global_arr_bool8 ={{.*}} global [3 x <2 x i8>] zeroinitializer, align 2
// CHECK-128-NEXT: @global_arr_bool16 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
+// CHECK-128-NEXT: @global_arr_bool32 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
+// CHECK-128-NEXT: @global_arr_bool64 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
// CHECK-256: @global_arr_i8 ={{.*}} global [3 x <32 x i8>] zeroinitializer, align 8
// CHECK-256-NEXT: @global_arr_i16 ={{.*}} global [3 x <16 x i16>] zeroinitializer, align 8
@@ -1417,6 +1398,7 @@ void f() {
// CHECK-256-NEXT: @global_arr_bool8 ={{.*}} global [3 x <4 x i8>] zeroinitializer, align 4
// CHECK-256-NEXT: @global_arr_bool16 ={{.*}} global [3 x <2 x i8>] zeroinitializer, align 2
// CHECK-256-NEXT: @global_arr_bool32 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
+// CHECK-256-NEXT: @global_arr_bool64 ={{.*}} global [3 x <1 x i8>] zeroinitializer, align 1
// CHECK-512: @global_arr_i8 ={{.*}} global [3 x <64 x i8>] zeroinitializer, align 8
// CHECK-512-NEXT: @global_arr_i16 ={{.*}} global [3 x <32 x i16>] zeroinitializer, align 8
@@ -1561,6 +1543,9 @@ void f() {
// CHECK-64-NEXT: %local_bool2 = alloca <4 x i8>, align 4
// CHECK-64-NEXT: %local_bool4 = alloca <2 x i8>, align 2
// CHECK-64-NEXT: %local_bool8 = alloca <1 x i8>, align 1
+// CHECK-64-NEXT: %local_bool16 = alloca <1 x i8>, align 1
+// CHECK-64-NEXT: %local_bool32 = alloca <1 x i8>, align 1
+// CHECK-64-NEXT: %local_bool64 = alloca <1 x i8>, align 1
// CHECK-128: %local_i8 = alloca <16 x i8>, align 8
// CHECK-128-NEXT: %local_i16 = alloca <8 x i16>, align 8
@@ -1607,6 +1592,8 @@ void f() {
// CHECK-128-NEXT: %local_bool4 = alloca <4 x i8>, align 4
// CHECK-128-NEXT: %local_bool8 = alloca <2 x i8>, align 2
// CHECK-128-NEXT: %local_bool16 = alloca <1 x i8>, align 1
+// CHECK-128-NEXT: %local_bool32 = alloca <1 x i8>, align 1
+// CHECK-128-NEXT: %local_bool64 = alloca <1 x i8>, align 1
// CHECK-256: %local_i8 = alloca <32 x i8>, align 8
// CHECK-256-NEXT: %local_i16 = alloca <16 x i16>, align 8
@@ -1654,6 +1641,7 @@ void f() {
// CHECK-256-NEXT: %local_bool8 = alloca <4 x i8>, align 4
// CHECK-256-NEXT: %local_bool16 = alloca <2 x i8>, align 2
// CHECK-256-NEXT: %local_bool32 = alloca <1 x i8>, align 1
+// CHECK-256-NEXT: %local_bool64 = alloca <1 x i8>, align 1
// CHECK-512: %local_i8 = alloca <64 x i8>, align 8
// CHECK-512-NEXT: %local_i16 = alloca <32 x i16>, align 8
@@ -1811,6 +1799,9 @@ void f() {
// CHECK-64-NEXT: %local_arr_bool2 = alloca [3 x <4 x i8>], align 4
// CHECK-64-NEXT: %local_arr_bool4 = alloca [3 x <2 x i8>], align 2
// CHECK-64-NEXT: %local_arr_bool8 = alloca [3 x <1 x i8>], align 1
+// CHECK-64-NEXT: %local_arr_bool16 = alloca [3 x <1 x i8>], align 1
+// CHECK-64-NEXT: %local_arr_bool32 = alloca [3 x <1 x i8>], align 1
+// CHECK-64-NEXT: %local_arr_bool64 = alloca [3 x <1 x i8>], align 1
// CHECK-128: %local_arr_i8 = alloca [3 x <16 x i8>], align 8
// CHECK-128-NEXT: %local_arr_i16 = alloca [3 x <8 x i16>], align 8
@@ -1870,6 +1861,8 @@ void f() {
// CHECK-128-NEXT: %local_arr_bool4 = alloca [3 x <4 x i8>], align 4
// CHECK-128-NEXT: %local_arr_bool8 = alloca [3 x <2 x i8>], align 2
// CHECK-128-NEXT: %local_arr_bool16 = alloca [3 x <1 x i8>], align 1
+// CHECK-128-NEXT: %local_arr_bool32 = alloca [3 x <1 x i8>], align 1
+// CHECK-128-NEXT: %local_arr_bool64 = alloca [3 x <1 x i8>], align 1
// CHECK-256: %local_arr_i8 = alloca [3 x <32 x i8>], align 8
// CHECK-256-NEXT: %local_arr_i16 = alloca [3 x <16 x i16>], align 8
@@ -1930,6 +1923,7 @@ void f() {
// CHECK-256-NEXT: %local_arr_bool8 = alloca [3 x <4 x i8>], align 4
// CHECK-256-NEXT: %local_arr_bool16 = alloca [3 x <2 x i8>], align 2
// CHECK-256-NEXT: %local_arr_bool32 = alloca [3 x <1 x i8>], align 1
+// CHECK-256-NEXT: %local_arr_bool64 = alloca [3 x <1 x i8>], align 1
// CHECK-512: %local_arr_i8 = alloca [3 x <64 x i8>], align 8
// CHECK-512-NEXT: %local_arr_i16 = alloca [3 x <32 x i16>], align 8
diff --git a/clang/test/CodeGenCXX/riscv-mangle-rvv-fixed-vectors.cpp b/clang/test/CodeGenCXX/riscv-mangle-rvv-fixed-vectors.cpp
index c9e7313a021a5..70f0357127324 100644
--- a/clang/test/CodeGenCXX/riscv-mangle-rvv-fixed-vectors.cpp
+++ b/clang/test/CodeGenCXX/riscv-mangle-rvv-fixed-vectors.cpp
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -triple riscv64-none-linux-gnu %s -emit-llvm -o - \
// RUN: -target-feature +f -target-feature +d -target-feature +zfh \
// RUN: -target-feature +zve64d -target-feature +zvfh -mvscale-min=1 \
-// RUN: -mvscale-max=1 | FileCheck %s --check-prefix=CHECK-64
+// RUN: -mvscale-max=1 | FileCheck %s --check-prefix=CHECK-64
// RUN: %clang_cc1 -triple riscv64-none-linux-gnu %s -emit-llvm -o - \
// RUN: -target-feature +f -target-feature +d -target-feature +zfh \
// RUN: -target-feature +zve64d -target-feature +zvfh -mvscale-min=2 \
@@ -176,15 +176,9 @@ typedef vbool1_t fixed_bool1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fi
typedef vbool2_t fixed_bool2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/2)));
typedef vbool4_t fixed_bool4_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/4)));
typedef vbool8_t fixed_bool8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/8)));
-#if __riscv_v_fixed_vlen >= 128
typedef vbool16_t fixed_bool16_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/16)));
-#endif
-#if __riscv_v_fixed_vlen >= 256
typedef vbool32_t fixed_bool32_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/32)));
-#endif
-#if __riscv_v_fixed_vlen >= 512
typedef vbool64_t fixed_bool64_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen/64)));
-#endif
template <typename T> struct S {};
@@ -629,24 +623,24 @@ void bool4(S<fixed_bool4_t>) {}
// CHECK-1024: _Z5bool81SI9__RVV_VLSIu13__rvv_bool8_tLj128EEE
void bool8(S<fixed_bool8_t>) {}
-#if __riscv_v_fixed_vlen >= 128
+// CHECK-64: _Z6bool161SI9__RVV_VLSIu14__rvv_bool16_tLj4EEE
// CHECK-128: _Z6bool161SI9__RVV_VLSIu14__rvv_bool16_tLj8EEE
// CHECK-256: _Z6bool161SI9__RVV_VLSIu14__rvv_bool16_tLj16EEE
// CHECK-512: _Z6bool161SI9__RVV_VLSIu14__rvv_bool16_tLj32EEE
// CHECK-1024: _Z6bool161SI9__RVV_VLSIu14__rvv_bool16_tLj64EEE
//
void bool16(S<fixed_bool16_t>) {}
-#endif
-#if __riscv_v_fixed_vlen >= 256
+// CHECK-64: _Z6bool321SI9__RVV_VLSIu14__rvv_bool32_tLj2EEE
+// CHECK-128: _Z6bool321SI9__RVV_VLSIu14__rvv_bool32_tLj4EEE
// CHECK-256: _Z6bool321SI9__RVV_VLSIu14__rvv_bool32_tLj8EEE
// CHECK-512: _Z6bool321SI9__RVV_VLSIu14__rvv_bool32_tLj16EEE
// CHECK-1024: _Z6bool321SI9__RVV_VLSIu14__rvv_bool32_tLj32EEE
void bool32(S<fixed_bool32_t>) {}
-#endif
-#if __riscv_v_fixed_vlen >= 512
+// CHECK-64: _Z6bool641SI9__RVV_VLSIu14__rvv_bool64_tLj1EEE
+// CHECK-128: _Z6bool641SI9__RVV_VLSIu14__rvv_bool64_tLj2EEE
+// CHECK-256: _Z6bool641SI9__RVV_VLSIu14__rvv_bool64_tLj4EEE
// CHECK-512: _Z6bool641SI9__RVV_VLSIu14__rvv_bool64_tLj8EEE
// CHECK-1024: _Z6bool641SI9__RVV_VLSIu14__rvv_bool64_tLj16EEE
void bool64(S<fixed_bool64_t>) {}
-#endif
diff --git a/clang/test/Sema/attr-riscv-rvv-vector-bits.c b/clang/test/Sema/attr-riscv-rvv-vector-bits.c
index 60ba2aa034f6e..9ac904b043f82 100644
--- a/clang/test/Sema/attr-riscv-rvv-vector-bits.c
+++ b/clang/test/Sema/attr-riscv-rvv-vector-bits.c
@@ -232,15 +232,9 @@ typedef vbool1_t fixed_bool1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fi
typedef vbool2_t fixed_bool2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 2)));
typedef vbool4_t fixed_bool4_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 4)));
typedef vbool8_t fixed_bool8_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 8)));
-#if __riscv_v_fixed_vlen / 16 >= 8
typedef vbool16_t fixed_bool16_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 16)));
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
typedef vbool32_t fixed_bool32_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 32)));
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
typedef vbool64_t fixed_bool64_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 64)));
-#endif
// Attribute must be attached to a single RVV vector or predicate type.
typedef void *badtype1 __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); // expected-error {{'riscv_rvv_vector_bits' attribute applied to non-RVV type 'void *'}}
@@ -418,12 +412,18 @@ _Static_assert(sizeof(fixed_bool4_t) == VECTOR_SIZE / 4, "");
_Static_assert(sizeof(fixed_bool8_t) == VECTOR_SIZE / 8, "");
#if __riscv_v_fixed_vlen / 16 >= 8
_Static_assert(sizeof(fixed_bool16_t) == VECTOR_SIZE / 16, "");
+#else
+_Static_assert(sizeof(fixed_bool16_t) == 1, "");
#endif
#if __riscv_v_fixed_vlen / 32 >= 8
_Static_assert(sizeof(fixed_bool32_t) == VECTOR_SIZE / 32, "");
+#else
+_Static_assert(sizeof(fixed_bool32_t) == 1, "");
#endif
#if __riscv_v_fixed_vlen / 64 >= 8
_Static_assert(sizeof(fixed_bool64_t) == VECTOR_SIZE / 64, "");
+#else
+_Static_assert(sizeof(fixed_bool64_t) == 1, "");
#endif
// --------------------------------------------------------------------------//
@@ -507,15 +507,9 @@ _Static_assert(__alignof__(fixed_bool1_t) == VECTOR_ALIGN, "");
_Static_assert(__alignof__(fixed_bool2_t) == (sizeof(fixed_bool2_t) < VECTOR_ALIGN ? sizeof(fixed_bool2_t) : VECTOR_ALIGN), "");
_Static_assert(__alignof__(fixed_bool4_t) == (sizeof(fixed_bool4_t) < VECTOR_ALIGN ? sizeof(fixed_bool4_t) : VECTOR_ALIGN), "");
_Static_assert(__alignof__(fixed_bool8_t) == (sizeof(fixed_bool8_t) < VECTOR_ALIGN ? sizeof(fixed_bool8_t) : VECTOR_ALIGN), "");
-#if __riscv_v_fixed_vlen / 16 >= 8
_Static_assert(__alignof__(fixed_bool16_t) == (sizeof(fixed_bool16_t) < VECTOR_ALIGN ? sizeof(fixed_bool16_t) : VECTOR_ALIGN), "");
-#endif
-#if __riscv_v_fixed_vlen / 32 >= 8
_Static_assert(__alignof__(fixed_bool32_t) == (sizeof(fixed_bool32_t) < VECTOR_ALIGN ? sizeof(fixed_bool32_t) : VECTOR_ALIGN), "");
-#endif
-#if __riscv_v_fixed_vlen / 64 >= 8
_Static_assert(__alignof__(fixed_bool64_t) == (sizeof(fixed_bool64_t) < VECTOR_ALIGN ? sizeof(fixed_bool64_t) : VECTOR_ALIGN), "");
-#endif
// --------------------------------------------------------------------------//
// Structs
More information about the cfe-commits
mailing list