[clang] [X86][Clang] Add AVX512 Integer Comparison Intrinsics for constexpr Evaluation (PR #164026)
via cfe-commits
cfe-commits at lists.llvm.org
Sat Oct 18 13:26:49 PDT 2025
https://github.com/sskzakaria updated https://github.com/llvm/llvm-project/pull/164026
>From dbcb924813e9d657e7aba3b7a9cd79b7635f63ab Mon Sep 17 00:00:00 2001
From: sskzakaria <ssskzakaria at proton.me>
Date: Fri, 17 Oct 2025 18:27:24 -0400
Subject: [PATCH 1/7] [X86][Clang] VectorExprEvaluator::VisitCallExpr /
InterpretBuiltin - add AVX512 integer comparison intrinsics to be used in
constexpr
---
clang/include/clang/Basic/BuiltinsX86.td | 36 ++++-----
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 84 ++++++++++++++++++++
clang/lib/AST/ExprConstant.cpp | 81 +++++++++++++++++++
clang/lib/Headers/avx512vlbwintrin.h | 8 +-
clang/test/CodeGen/X86/avx512vlbw-builtins.c | 23 ++++++
5 files changed, 210 insertions(+), 22 deletions(-)
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index 62c70fba946be..e409042c5818e 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -1272,81 +1272,81 @@ let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
def knotdi : X86Builtin<"unsigned long long int(unsigned long long int)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpb128_mask : X86Builtin<"unsigned short(_Vector<16, char>, _Vector<16, char>, _Constant int, unsigned short)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpd128_mask : X86Builtin<"unsigned char(_Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">;
def cmpq128_mask : X86Builtin<"unsigned char(_Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpw128_mask : X86Builtin<"unsigned char(_Vector<8, short>, _Vector<8, short>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpb256_mask : X86Builtin<"unsigned int(_Vector<32, char>, _Vector<32, char>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpd256_mask : X86Builtin<"unsigned char(_Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">;
def cmpq256_mask : X86Builtin<"unsigned char(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpw256_mask : X86Builtin<"unsigned short(_Vector<16, short>, _Vector<16, short>, _Constant int, unsigned short)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, _Constant int, unsigned long long int)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpd512_mask : X86Builtin<"unsigned short(_Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">;
def cmpq512_mask : X86Builtin<"unsigned char(_Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpw512_mask : X86Builtin<"unsigned int(_Vector<32, short>, _Vector<32, short>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpb128_mask : X86Builtin<"unsigned short(_Vector<16, char>, _Vector<16, char>, _Constant int, unsigned short)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpd128_mask : X86Builtin<"unsigned char(_Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">;
def ucmpq128_mask : X86Builtin<"unsigned char(_Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpw128_mask : X86Builtin<"unsigned char(_Vector<8, short>, _Vector<8, short>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpb256_mask : X86Builtin<"unsigned int(_Vector<32, char>, _Vector<32, char>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpd256_mask : X86Builtin<"unsigned char(_Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">;
def ucmpq256_mask : X86Builtin<"unsigned char(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpw256_mask : X86Builtin<"unsigned short(_Vector<16, short>, _Vector<16, short>, _Constant int, unsigned short)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, _Constant int, unsigned long long int)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpd512_mask : X86Builtin<"unsigned short(_Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">;
def ucmpq512_mask : X86Builtin<"unsigned char(_Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, RequiredVectorWidth<512>] in {
+let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpw512_mask : X86Builtin<"unsigned int(_Vector<32, short>, _Vector<32, short>, _Constant int, unsigned int)">;
}
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 0ee18be166845..0ba8fc0cbc203 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3101,6 +3101,62 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC,
return true;
}
+ static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call, unsigned ID,
+ bool IsUnsigned) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt Mask = popToAPSInt(S, Call->getArg(3));
+ APSInt Opcode = popToAPSInt(S, Call->getArg(2));
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+
+ assert(LHS.getNumElems() == RHS.getNumElems());
+
+ APInt RetMask = APInt::getZero(LHS.getNumElems());
+ unsigned VectorLen = LHS.getNumElems();
+ PrimType ElemT = LHS.getFieldDesc()->getPrimType();
+
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ APSInt A = LHS.elem<T>(ElemNum).toAPSInt();
+ APSInt B = RHS.elem<T>(ElemNum).toAPSInt();
+ bool result = false;
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ result = (A == B);
+ break;
+ case 0x01: // _MM_CMPINT_LT
+ result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 0x02: // _MM_CMPINT_LE
+ result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 0x03: // _MM_CMPINT_FALSE
+ result = false;
+ break;
+ case 0x04: // _MM_CMPINT_NE
+ result = (A != B);
+ break;
+ case 0x05: // _MM_CMPINT_NLT (>=)
+ result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 0x06: // _MM_CMPINT_NLE (>)
+ result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 0x07: // _MM_CMPINT_TRUE
+ result = true;
+ break;
+ }
+
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && result);
+ }
+ });
+
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+ }
+
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
assert(Call->getNumArgs() == 1);
@@ -4141,6 +4197,34 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_cmpb128_mask:
+ case X86::BI__builtin_ia32_cmpw128_mask:
+ case X86::BI__builtin_ia32_cmpd128_mask:
+ case X86::BI__builtin_ia32_cmpq128_mask:
+ case X86::BI__builtin_ia32_cmpb256_mask:
+ case X86::BI__builtin_ia32_cmpw256_mask:
+ case X86::BI__builtin_ia32_cmpd256_mask:
+ case X86::BI__builtin_ia32_cmpq256_mask:
+ case X86::BI__builtin_ia32_cmpb512_mask:
+ case X86::BI__builtin_ia32_cmpw512_mask:
+ case X86::BI__builtin_ia32_cmpd512_mask:
+ case X86::BI__builtin_ia32_cmpq512_mask:
+ return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID, /*IsUnsigned*/false);
+
+ case X86::BI__builtin_ia32_ucmpb128_mask:
+ case X86::BI__builtin_ia32_ucmpw128_mask:
+ case X86::BI__builtin_ia32_ucmpd128_mask:
+ case X86::BI__builtin_ia32_ucmpq128_mask:
+ case X86::BI__builtin_ia32_ucmpb256_mask:
+ case X86::BI__builtin_ia32_ucmpw256_mask:
+ case X86::BI__builtin_ia32_ucmpd256_mask:
+ case X86::BI__builtin_ia32_ucmpq256_mask:
+ case X86::BI__builtin_ia32_ucmpb512_mask:
+ case X86::BI__builtin_ia32_ucmpw512_mask:
+ case X86::BI__builtin_ia32_ucmpd512_mask:
+ case X86::BI__builtin_ia32_ucmpq512_mask:
+ return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID, /*IsUnsigned*/true);
+
default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 16141b27f4ce8..ef17e16388fd8 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -15449,6 +15449,87 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned Idx = static_cast<unsigned>(IdxAPS.getZExtValue() & (N - 1));
return Success(Vec.getVectorElt(Idx).getInt(), E);
}
+
+ case clang::X86::BI__builtin_ia32_cmpb128_mask:
+ case clang::X86::BI__builtin_ia32_cmpw128_mask:
+ case clang::X86::BI__builtin_ia32_cmpd128_mask:
+ case clang::X86::BI__builtin_ia32_cmpq128_mask:
+ case clang::X86::BI__builtin_ia32_cmpb256_mask:
+ case clang::X86::BI__builtin_ia32_cmpw256_mask:
+ case clang::X86::BI__builtin_ia32_cmpd256_mask:
+ case clang::X86::BI__builtin_ia32_cmpq256_mask:
+ case clang::X86::BI__builtin_ia32_cmpb512_mask:
+ case clang::X86::BI__builtin_ia32_cmpw512_mask:
+ case clang::X86::BI__builtin_ia32_cmpd512_mask:
+ case clang::X86::BI__builtin_ia32_cmpq512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq128_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq256_mask:
+ case clang::X86::BI__builtin_ia32_ucmpb512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpw512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpd512_mask:
+ case clang::X86::BI__builtin_ia32_ucmpq512_mask: {
+ assert(E->getNumArgs() == 4);
+
+ bool IsUnsigned = (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
+ BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpq512_mask);
+
+ APValue LHS, RHS;
+ APSInt Mask, Opcode;
+ if (!EvaluateVector(E->getArg(0), LHS, Info) ||
+ !EvaluateVector(E->getArg(1), RHS, Info) ||
+ !EvaluateInteger(E->getArg(2), Opcode, Info) ||
+ !EvaluateInteger(E->getArg(3), Mask, Info))
+ return false;
+
+ assert(LHS.getVectorLength() == RHS.getVectorLength());
+
+ APSInt RetMask = APSInt::getUnsigned(0);
+ unsigned VectorLen = LHS.getVectorLength();
+
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ APSInt A = LHS.getVectorElt(ElemNum).getInt();
+ APSInt B = RHS.getVectorElt(ElemNum).getInt();
+ bool result = false;
+
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0: // _MM_CMPINT_EQ
+ result = (A == B);
+ break;
+ case 1: // _MM_CMPINT_LT
+ result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 2: // _MM_CMPINT_LE
+ result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 3: // _MM_CMPINT_FALSE
+ result = false;
+ break;
+ case 4: // _MM_CMPINT_NE
+ result = (A != B);
+ break;
+ case 5: // _MM_CMPINT_NLT (>=)
+ result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 6: // _MM_CMPINT_NLE (>)
+ result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 7: // _MM_CMPINT_TRUE
+ result = true;
+ break;
+ }
+
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && result);
+ }
+
+ RetMask.setIsUnsigned(true);
+ return Success(APValue(RetMask), E);
+ }
}
}
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 639fb60f476c6..ff7ee777ea82a 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -2385,20 +2385,20 @@ _mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
(__mmask32) __U);
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_test_epi8_mask (__m128i __A, __m128i __B)
{
return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
{
return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B),
_mm_setzero_si128());
}
-static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
+static __inline__ __mmask32 __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_test_epi8_mask (__m256i __A, __m256i __B)
{
return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B),
@@ -2439,7 +2439,7 @@ _mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
_mm256_setzero_si256());
}
-static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_testn_epi8_mask (__m128i __A, __m128i __B)
{
return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index d569283928a0a..1cc4518484c19 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -2890,6 +2890,12 @@ __mmask16 test_mm_test_epi8_mask(__m128i __A, __m128i __B) {
return _mm_test_epi8_mask(__A, __B);
}
+TEST_CONSTEXPR(_mm_test_epi8_mask(
+ (__m128i)(__v16qi){1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ (__m128i)(__v16qi){1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+)
+== (__mmask16)0xfffb);
+
__mmask16 test_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_test_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
@@ -2897,6 +2903,12 @@ __mmask16 test_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK: and <16 x i1> %{{.*}}, %{{.*}}
return _mm_mask_test_epi8_mask(__U, __A, __B);
}
+TEST_CONSTEXPR(_mm_mask_test_epi8_mask(
+ 0xFFFF,
+ (__m128i)(__v16qi){1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ (__m128i)(__v16qi){1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+)
+== (__mmask16)0xfffb);
__mmask32 test_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
// CHECK-LABEL: test_mm256_test_epi8_mask
@@ -2904,6 +2916,11 @@ __mmask32 test_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
// CHECK: icmp ne <32 x i8> %{{.*}}, %{{.*}}
return _mm256_test_epi8_mask(__A, __B);
}
+TEST_CONSTEXPR(_mm256_test_epi8_mask(
+ (__m256i)(__v32qi){1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
+ (__m256i)(__v32qi){1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+)
+== (__mmask32)0xfffbfffb);
__mmask32 test_mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: test_mm256_mask_test_epi8_mask
@@ -2950,6 +2967,12 @@ __mmask16 test_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
return _mm_testn_epi8_mask(__A, __B);
}
+TEST_CONSTEXPR(_mm_testn_epi8_mask(
+ (__m128i)(__v16qi){1, 2, 77, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1, 16, 16},
+ (__m128i)(__v16qi){2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15}
+)
+== (__mmask16)0xe001);
+
__mmask16 test_mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
// CHECK-LABEL: test_mm_mask_testn_epi8_mask
// CHECK: and <2 x i64> %{{.*}}, %{{.*}}
>From fec68e4aecd482f216dcf520b94fb60f44075e22 Mon Sep 17 00:00:00 2001
From: sskzakaria <ssskzakaria at proton.me>
Date: Fri, 17 Oct 2025 21:32:39 -0400
Subject: [PATCH 2/7] build error
---
clang/include/clang/Basic/BuiltinsX86.td | 54 ++++++++----
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 104 ++++++++++++-----------
clang/lib/AST/ExprConstant.cpp | 60 +++++++------
clang/lib/Headers/avx512vlbwintrin.h | 12 +--
4 files changed, 125 insertions(+), 105 deletions(-)
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index e409042c5818e..a2a1746fd1cae 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -1272,81 +1272,99 @@ let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr] in {
def knotdi : X86Builtin<"unsigned long long int(unsigned long long int)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpb128_mask : X86Builtin<"unsigned short(_Vector<16, char>, _Vector<16, char>, _Constant int, unsigned short)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpd128_mask : X86Builtin<"unsigned char(_Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">;
def cmpq128_mask : X86Builtin<"unsigned char(_Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def cmpw128_mask : X86Builtin<"unsigned char(_Vector<8, short>, _Vector<8, short>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpb256_mask : X86Builtin<"unsigned int(_Vector<32, char>, _Vector<32, char>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpd256_mask : X86Builtin<"unsigned char(_Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">;
def cmpq256_mask : X86Builtin<"unsigned char(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def cmpw256_mask : X86Builtin<"unsigned short(_Vector<16, short>, _Vector<16, short>, _Constant int, unsigned short)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, _Constant int, unsigned long long int)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+let Features = "avx512f",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpd512_mask : X86Builtin<"unsigned short(_Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">;
def cmpq512_mask : X86Builtin<"unsigned char(_Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def cmpw512_mask : X86Builtin<"unsigned int(_Vector<32, short>, _Vector<32, short>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpb128_mask : X86Builtin<"unsigned short(_Vector<16, char>, _Vector<16, char>, _Constant int, unsigned short)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpd128_mask : X86Builtin<"unsigned char(_Vector<4, int>, _Vector<4, int>, _Constant int, unsigned char)">;
def ucmpq128_mask : X86Builtin<"unsigned char(_Vector<2, long long int>, _Vector<2, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<128>] in {
def ucmpw128_mask : X86Builtin<"unsigned char(_Vector<8, short>, _Vector<8, short>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpb256_mask : X86Builtin<"unsigned int(_Vector<32, char>, _Vector<32, char>, _Constant int, unsigned int)">;
}
-let Features = "avx512vl", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+let Features = "avx512vl",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpd256_mask : X86Builtin<"unsigned char(_Vector<8, int>, _Vector<8, int>, _Constant int, unsigned char)">;
def ucmpq256_mask : X86Builtin<"unsigned char(_Vector<4, long long int>, _Vector<4, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512vl,avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
+let Features = "avx512vl,avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<256>] in {
def ucmpw256_mask : X86Builtin<"unsigned short(_Vector<16, short>, _Vector<16, short>, _Constant int, unsigned short)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpb512_mask : X86Builtin<"unsigned long long int(_Vector<64, char>, _Vector<64, char>, _Constant int, unsigned long long int)">;
}
-let Features = "avx512f", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+let Features = "avx512f",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpd512_mask : X86Builtin<"unsigned short(_Vector<16, int>, _Vector<16, int>, _Constant int, unsigned short)">;
def ucmpq512_mask : X86Builtin<"unsigned char(_Vector<8, long long int>, _Vector<8, long long int>, _Constant int, unsigned char)">;
}
-let Features = "avx512bw", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
+let Features = "avx512bw",
+ Attributes = [NoThrow, Const, Constexpr, RequiredVectorWidth<512>] in {
def ucmpw512_mask : X86Builtin<"unsigned int(_Vector<32, short>, _Vector<32, short>, _Constant int, unsigned int)">;
}
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 0ba8fc0cbc203..2526c13b529a7 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3101,61 +3101,61 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC,
return true;
}
- static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
- const CallExpr *Call, unsigned ID,
- bool IsUnsigned) {
- assert(Call->getNumArgs() == 4);
+static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call, unsigned ID,
+ bool IsUnsigned) {
+ assert(Call->getNumArgs() == 4);
- APSInt Mask = popToAPSInt(S, Call->getArg(3));
- APSInt Opcode = popToAPSInt(S, Call->getArg(2));
- const Pointer &LHS = S.Stk.pop<Pointer>();
- const Pointer &RHS = S.Stk.pop<Pointer>();
-
- assert(LHS.getNumElems() == RHS.getNumElems());
+ APSInt Mask = popToAPSInt(S, Call->getArg(3));
+ APSInt Opcode = popToAPSInt(S, Call->getArg(2));
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &RHS = S.Stk.pop<Pointer>();
- APInt RetMask = APInt::getZero(LHS.getNumElems());
- unsigned VectorLen = LHS.getNumElems();
- PrimType ElemT = LHS.getFieldDesc()->getPrimType();
+ assert(LHS.getNumElems() == RHS.getNumElems());
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
- APSInt A = LHS.elem<T>(ElemNum).toAPSInt();
- APSInt B = RHS.elem<T>(ElemNum).toAPSInt();
- bool result = false;
- switch (Opcode.getExtValue() & 0x7) {
- case 0x00: // _MM_CMPINT_EQ
- result = (A == B);
- break;
- case 0x01: // _MM_CMPINT_LT
- result = IsUnsigned ? A.ult(B) : A.slt(B);
- break;
- case 0x02: // _MM_CMPINT_LE
- result = IsUnsigned ? A.ule(B) : A.sle(B);
- break;
- case 0x03: // _MM_CMPINT_FALSE
- result = false;
- break;
- case 0x04: // _MM_CMPINT_NE
- result = (A != B);
- break;
- case 0x05: // _MM_CMPINT_NLT (>=)
- result = IsUnsigned ? A.uge(B) : A.sge(B);
- break;
- case 0x06: // _MM_CMPINT_NLE (>)
- result = IsUnsigned ? A.ugt(B) : A.sgt(B);
- break;
- case 0x07: // _MM_CMPINT_TRUE
- result = true;
- break;
- }
+ APInt RetMask = APInt::getZero(LHS.getNumElems());
+ unsigned VectorLen = LHS.getNumElems();
+ PrimType ElemT = LHS.getFieldDesc()->getPrimType();
- RetMask.setBitVal(ElemNum, Mask[ElemNum] && result);
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ APSInt A = LHS.elem<T>(ElemNum).toAPSInt();
+ APSInt B = RHS.elem<T>(ElemNum).toAPSInt();
+ bool result = false;
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ result = (A == B);
+ break;
+ case 0x01: // _MM_CMPINT_LT
+ result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 0x02: // _MM_CMPINT_LE
+ result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 0x03: // _MM_CMPINT_FALSE
+ result = false;
+ break;
+ case 0x04: // _MM_CMPINT_NE
+ result = (A != B);
+ break;
+ case 0x05: // _MM_CMPINT_NLT (>=)
+ result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 0x06: // _MM_CMPINT_NLE (>)
+ result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 0x07: // _MM_CMPINT_TRUE
+ result = true;
+ break;
}
- });
- pushInteger(S, RetMask, Call->getType());
- return true;
- }
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && result);
+ }
+ });
+
+ pushInteger(S, RetMask, Call->getType());
+ return true;
+}
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
const CallExpr *Call) {
@@ -4209,7 +4209,8 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_cmpw512_mask:
case X86::BI__builtin_ia32_cmpd512_mask:
case X86::BI__builtin_ia32_cmpq512_mask:
- return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID, /*IsUnsigned*/false);
+ return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned*/ false);
case X86::BI__builtin_ia32_ucmpb128_mask:
case X86::BI__builtin_ia32_ucmpw128_mask:
@@ -4223,7 +4224,8 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_ucmpw512_mask:
case X86::BI__builtin_ia32_ucmpd512_mask:
case X86::BI__builtin_ia32_ucmpq512_mask:
- return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID, /*IsUnsigned*/true);
+ return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID,
+ /*IsUnsigned*/ true);
default:
S.FFDiag(S.Current->getLocation(OpPC),
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index ef17e16388fd8..ac3e3c51fb983 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -15476,8 +15476,9 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
case clang::X86::BI__builtin_ia32_ucmpq512_mask: {
assert(E->getNumArgs() == 4);
- bool IsUnsigned = (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
- BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpq512_mask);
+ bool IsUnsigned =
+ (BuiltinOp >= clang::X86::BI__builtin_ia32_ucmpb128_mask &&
+ BuiltinOp <= clang::X86::BI__builtin_ia32_ucmpq512_mask);
APValue LHS, RHS;
APSInt Mask, Opcode;
@@ -15487,41 +15488,44 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
!EvaluateInteger(E->getArg(3), Mask, Info))
return false;
- assert(LHS.getVectorLength() == RHS.getVectorLength());
+ assert(LHS.getVectorLength() == RHS.getVectorLength());
- APSInt RetMask = APSInt::getUnsigned(0);
unsigned VectorLen = LHS.getVectorLength();
+ unsigned RetWidth = VectorLen ? VectorLen : 1;
+ if (Mask.getBitWidth() > RetWidth)
+ RetWidth = Mask.getBitWidth();
+ APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
APSInt A = LHS.getVectorElt(ElemNum).getInt();
APSInt B = RHS.getVectorElt(ElemNum).getInt();
bool result = false;
switch (Opcode.getExtValue() & 0x7) {
- case 0: // _MM_CMPINT_EQ
- result = (A == B);
- break;
- case 1: // _MM_CMPINT_LT
- result = IsUnsigned ? A.ult(B) : A.slt(B);
- break;
- case 2: // _MM_CMPINT_LE
- result = IsUnsigned ? A.ule(B) : A.sle(B);
- break;
- case 3: // _MM_CMPINT_FALSE
- result = false;
- break;
- case 4: // _MM_CMPINT_NE
- result = (A != B);
- break;
- case 5: // _MM_CMPINT_NLT (>=)
- result = IsUnsigned ? A.uge(B) : A.sge(B);
- break;
- case 6: // _MM_CMPINT_NLE (>)
- result = IsUnsigned ? A.ugt(B) : A.sgt(B);
- break;
- case 7: // _MM_CMPINT_TRUE
- result = true;
- break;
+ case 0: // _MM_CMPINT_EQ
+ result = (A == B);
+ break;
+ case 1: // _MM_CMPINT_LT
+ result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 2: // _MM_CMPINT_LE
+ result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 3: // _MM_CMPINT_FALSE
+ result = false;
+ break;
+ case 4: // _MM_CMPINT_NE
+ result = (A != B);
+ break;
+ case 5: // _MM_CMPINT_NLT (>=)
+ result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 6: // _MM_CMPINT_NLE (>)
+ result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 7: // _MM_CMPINT_TRUE
+ result = true;
+ break;
}
RetMask.setBitVal(ElemNum, Mask[ElemNum] && result);
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index ff7ee777ea82a..97e48357f3ccc 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -2386,21 +2386,18 @@ _mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_test_epi8_mask (__m128i __A, __m128i __B)
-{
+_mm_test_epi8_mask(__m128i __A, __m128i __B) {
return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_test_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
-{
+_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B),
_mm_setzero_si128());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_test_epi8_mask (__m256i __A, __m256i __B)
-{
+_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B),
_mm256_setzero_si256());
}
@@ -2440,8 +2437,7 @@ _mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_testn_epi8_mask (__m128i __A, __m128i __B)
-{
+_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
}
>From ee703e034a3e044ba85af6ab1bf3b3144422015c Mon Sep 17 00:00:00 2001
From: sskzakaria <ssskzakaria at proton.me>
Date: Fri, 17 Oct 2025 23:59:25 -0400
Subject: [PATCH 3/7] fixed argument order
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 2 +-
clang/test/CodeGen/X86/avx512vlbw-builtins.c | 15 +++++++++++++++
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 2526c13b529a7..1f428360f73aa 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3108,8 +3108,8 @@ static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
APSInt Mask = popToAPSInt(S, Call->getArg(3));
APSInt Opcode = popToAPSInt(S, Call->getArg(2));
- const Pointer &LHS = S.Stk.pop<Pointer>();
const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
assert(LHS.getNumElems() == RHS.getNumElems());
diff --git a/clang/test/CodeGen/X86/avx512vlbw-builtins.c b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
index 1cc4518484c19..5c6a343559c52 100644
--- a/clang/test/CodeGen/X86/avx512vlbw-builtins.c
+++ b/clang/test/CodeGen/X86/avx512vlbw-builtins.c
@@ -645,6 +645,21 @@ __mmask16 test_mm_cmp_epi8_mask(__m128i __a, __m128i __b) {
return (__mmask16)_mm_cmp_epi8_mask(__a, __b, 0);
}
+TEST_CONSTEXPR(_mm_cmpeq_epi8_mask(
+ ((__m128i)(__v16qi){5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}),
+ ((__m128i)(__v16qi){3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3})
+) == (__mmask16)0x0000);
+
+TEST_CONSTEXPR(_mm_cmplt_epi8_mask(
+ ((__m128i)(__v16qi){5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}),
+ ((__m128i)(__v16qi){3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3})
+) == (__mmask16)0x0u);
+
+TEST_CONSTEXPR(_mm_cmple_epi8_mask(
+ ((__m128i)(__v16qi){3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3}),
+ ((__m128i)(__v16qi){3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3})
+) == (__mmask16)0xffff);
+
__mmask16 test_mm_mask_cmp_epi8_mask(__mmask16 __u, __m128i __a, __m128i __b) {
// CHECK-LABEL: test_mm_mask_cmp_epi8_mask
// CHECK: icmp eq <16 x i8> %{{.*}}, %{{.*}}
>From 3b6e186448cd3c031ef903ce894431eb6a77a0a2 Mon Sep 17 00:00:00 2001
From: sskzakaria <ssskzakaria at proton.me>
Date: Sat, 18 Oct 2025 03:30:31 -0400
Subject: [PATCH 4/7] moved the type switch into the loop body
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 32 ++++++++++++------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 1f428360f73aa..bab6a45b43251 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3117,42 +3117,42 @@ static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
unsigned VectorLen = LHS.getNumElems();
PrimType ElemT = LHS.getFieldDesc()->getPrimType();
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
- APSInt A = LHS.elem<T>(ElemNum).toAPSInt();
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
APSInt B = RHS.elem<T>(ElemNum).toAPSInt();
- bool result = false;
+ bool Result = false;
switch (Opcode.getExtValue() & 0x7) {
case 0x00: // _MM_CMPINT_EQ
- result = (A == B);
+ Result = (LHS.elem<T>(ElemNum).toAPSInt() == RHS.elem<T>(ElemNum).toAPSInt());
break;
case 0x01: // _MM_CMPINT_LT
- result = IsUnsigned ? A.ult(B) : A.slt(B);
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ult(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().slt(RHS.elem<T>(ElemNum).toAPSInt());
break;
case 0x02: // _MM_CMPINT_LE
- result = IsUnsigned ? A.ule(B) : A.sle(B);
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ule(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().sle(RHS.elem<T>(ElemNum).toAPSInt());
break;
case 0x03: // _MM_CMPINT_FALSE
- result = false;
+ Result = false;
break;
case 0x04: // _MM_CMPINT_NE
- result = (A != B);
+ Result = (LHS.elem<T>(ElemNum).toAPSInt() != RHS.elem<T>(ElemNum).toAPSInt());
break;
case 0x05: // _MM_CMPINT_NLT (>=)
- result = IsUnsigned ? A.uge(B) : A.sge(B);
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().uge(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().sge(RHS.elem<T>(ElemNum).toAPSInt());
break;
case 0x06: // _MM_CMPINT_NLE (>)
- result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ugt(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().sgt(RHS.elem<T>(ElemNum).toAPSInt());
break;
case 0x07: // _MM_CMPINT_TRUE
- result = true;
+ Result = true;
break;
}
- RetMask.setBitVal(ElemNum, Mask[ElemNum] && result);
- }
- });
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
+ });
+ }
+
pushInteger(S, RetMask, Call->getType());
return true;
}
@@ -4210,7 +4210,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_cmpd512_mask:
case X86::BI__builtin_ia32_cmpq512_mask:
return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID,
- /*IsUnsigned*/ false);
+ /*IsUnsigned=*/ false);
case X86::BI__builtin_ia32_ucmpb128_mask:
case X86::BI__builtin_ia32_ucmpw128_mask:
@@ -4225,7 +4225,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_ucmpd512_mask:
case X86::BI__builtin_ia32_ucmpq512_mask:
return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID,
- /*IsUnsigned*/ true);
+ /*IsUnsigned=*/ true);
default:
S.FFDiag(S.Current->getLocation(OpPC),
>From c3cdc21d060e2209c7744b86f1b622e009847e0a Mon Sep 17 00:00:00 2001
From: sskzakaria <ssskzakaria at proton.me>
Date: Sat, 18 Oct 2025 03:32:38 -0400
Subject: [PATCH 5/7] format
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 79 ++++++++++++++----------
1 file changed, 46 insertions(+), 33 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index bab6a45b43251..9966163db4113 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3119,40 +3119,53 @@ static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- APSInt B = RHS.elem<T>(ElemNum).toAPSInt();
- bool Result = false;
- switch (Opcode.getExtValue() & 0x7) {
- case 0x00: // _MM_CMPINT_EQ
- Result = (LHS.elem<T>(ElemNum).toAPSInt() == RHS.elem<T>(ElemNum).toAPSInt());
- break;
- case 0x01: // _MM_CMPINT_LT
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ult(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().slt(RHS.elem<T>(ElemNum).toAPSInt());
- break;
- case 0x02: // _MM_CMPINT_LE
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ule(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().sle(RHS.elem<T>(ElemNum).toAPSInt());
- break;
- case 0x03: // _MM_CMPINT_FALSE
- Result = false;
- break;
- case 0x04: // _MM_CMPINT_NE
- Result = (LHS.elem<T>(ElemNum).toAPSInt() != RHS.elem<T>(ElemNum).toAPSInt());
- break;
- case 0x05: // _MM_CMPINT_NLT (>=)
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().uge(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().sge(RHS.elem<T>(ElemNum).toAPSInt());
- break;
- case 0x06: // _MM_CMPINT_NLE (>)
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ugt(RHS.elem<T>(ElemNum).toAPSInt()) : LHS.elem<T>(ElemNum).toAPSInt().sgt(RHS.elem<T>(ElemNum).toAPSInt());
- break;
- case 0x07: // _MM_CMPINT_TRUE
- Result = true;
- break;
- }
+ APSInt B = RHS.elem<T>(ElemNum).toAPSInt();
+ bool Result = false;
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ Result = (LHS.elem<T>(ElemNum).toAPSInt() ==
+ RHS.elem<T>(ElemNum).toAPSInt());
+ break;
+ case 0x01: // _MM_CMPINT_LT
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ult(
+ RHS.elem<T>(ElemNum).toAPSInt())
+ : LHS.elem<T>(ElemNum).toAPSInt().slt(
+ RHS.elem<T>(ElemNum).toAPSInt());
+ break;
+ case 0x02: // _MM_CMPINT_LE
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ule(
+ RHS.elem<T>(ElemNum).toAPSInt())
+ : LHS.elem<T>(ElemNum).toAPSInt().sle(
+ RHS.elem<T>(ElemNum).toAPSInt());
+ break;
+ case 0x03: // _MM_CMPINT_FALSE
+ Result = false;
+ break;
+ case 0x04: // _MM_CMPINT_NE
+ Result = (LHS.elem<T>(ElemNum).toAPSInt() !=
+ RHS.elem<T>(ElemNum).toAPSInt());
+ break;
+ case 0x05: // _MM_CMPINT_NLT (>=)
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().uge(
+ RHS.elem<T>(ElemNum).toAPSInt())
+ : LHS.elem<T>(ElemNum).toAPSInt().sge(
+ RHS.elem<T>(ElemNum).toAPSInt());
+ break;
+ case 0x06: // _MM_CMPINT_NLE (>)
+ Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ugt(
+ RHS.elem<T>(ElemNum).toAPSInt())
+ : LHS.elem<T>(ElemNum).toAPSInt().sgt(
+ RHS.elem<T>(ElemNum).toAPSInt());
+ break;
+ case 0x07: // _MM_CMPINT_TRUE
+ Result = true;
+ break;
+ }
- RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
});
-
}
-
+
pushInteger(S, RetMask, Call->getType());
return true;
}
@@ -4210,7 +4223,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_cmpd512_mask:
case X86::BI__builtin_ia32_cmpq512_mask:
return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID,
- /*IsUnsigned=*/ false);
+ /*IsUnsigned=*/false);
case X86::BI__builtin_ia32_ucmpb128_mask:
case X86::BI__builtin_ia32_ucmpw128_mask:
@@ -4225,7 +4238,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_ucmpd512_mask:
case X86::BI__builtin_ia32_ucmpq512_mask:
return interp__builtin_cmp_mask(S, OpPC, Call, BuiltinID,
- /*IsUnsigned=*/ true);
+ /*IsUnsigned=*/true);
default:
S.FFDiag(S.Current->getLocation(OpPC),
>From 151e23bc11d32dd2fa4759bc4d72a0cf52199a60 Mon Sep 17 00:00:00 2001
From: sskzakaria <ssskzakaria at proton.me>
Date: Sat, 18 Oct 2025 16:18:25 -0400
Subject: [PATCH 6/7] removing repeated elem(ElemNum).toAPSInt() calls to top
of the loop
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 29 +++++++-----------------
clang/lib/AST/ExprConstant.cpp | 5 ++--
2 files changed, 10 insertions(+), 24 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 9966163db4113..2a996adaedc6f 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3119,43 +3119,30 @@ static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- APSInt B = RHS.elem<T>(ElemNum).toAPSInt();
+ const APSInt &A = LHS.elem<T>(ElemNum).toAPSInt();
+ const APSInt &B = RHS.elem<T>(ElemNum).toAPSInt();
bool Result = false;
switch (Opcode.getExtValue() & 0x7) {
case 0x00: // _MM_CMPINT_EQ
- Result = (LHS.elem<T>(ElemNum).toAPSInt() ==
- RHS.elem<T>(ElemNum).toAPSInt());
+ Result = (A == B);
break;
case 0x01: // _MM_CMPINT_LT
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ult(
- RHS.elem<T>(ElemNum).toAPSInt())
- : LHS.elem<T>(ElemNum).toAPSInt().slt(
- RHS.elem<T>(ElemNum).toAPSInt());
+ Result = IsUnsigned ? A.ult(B) : A.slt(B);
break;
case 0x02: // _MM_CMPINT_LE
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ule(
- RHS.elem<T>(ElemNum).toAPSInt())
- : LHS.elem<T>(ElemNum).toAPSInt().sle(
- RHS.elem<T>(ElemNum).toAPSInt());
+ Result = IsUnsigned ? A.ule(B) : A.sle(B);
break;
case 0x03: // _MM_CMPINT_FALSE
Result = false;
break;
case 0x04: // _MM_CMPINT_NE
- Result = (LHS.elem<T>(ElemNum).toAPSInt() !=
- RHS.elem<T>(ElemNum).toAPSInt());
+ Result = (A != B);
break;
case 0x05: // _MM_CMPINT_NLT (>=)
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().uge(
- RHS.elem<T>(ElemNum).toAPSInt())
- : LHS.elem<T>(ElemNum).toAPSInt().sge(
- RHS.elem<T>(ElemNum).toAPSInt());
+ Result = IsUnsigned ? A.uge(B) : A.sge(B);
break;
case 0x06: // _MM_CMPINT_NLE (>)
- Result = IsUnsigned ? LHS.elem<T>(ElemNum).toAPSInt().ugt(
- RHS.elem<T>(ElemNum).toAPSInt())
- : LHS.elem<T>(ElemNum).toAPSInt().sgt(
- RHS.elem<T>(ElemNum).toAPSInt());
+ Result = IsUnsigned ? A.ugt(B) : A.sgt(B);
break;
case 0x07: // _MM_CMPINT_TRUE
Result = true;
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index ac3e3c51fb983..4508029ed2494 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -15497,8 +15497,8 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
- APSInt A = LHS.getVectorElt(ElemNum).getInt();
- APSInt B = RHS.getVectorElt(ElemNum).getInt();
+ const APSInt &A = LHS.getVectorElt(ElemNum).getInt();
+ const APSInt &B = RHS.getVectorElt(ElemNum).getInt();
bool result = false;
switch (Opcode.getExtValue() & 0x7) {
@@ -15531,7 +15531,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
RetMask.setBitVal(ElemNum, Mask[ElemNum] && result);
}
- RetMask.setIsUnsigned(true);
return Success(APValue(RetMask), E);
}
}
>From e96b79b8ad298dc2bd929cf8f77be4fcce768ce8 Mon Sep 17 00:00:00 2001
From: sskzakaria <ssskzakaria at proton.me>
Date: Sat, 18 Oct 2025 16:26:25 -0400
Subject: [PATCH 7/7] Apply clang-format
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 68 +-
clang/lib/AST/ExprConstant.cpp | 4081 +++++++++++-----------
clang/lib/Headers/avx512vlbwintrin.h | 2432 ++++++-------
3 files changed, 3094 insertions(+), 3487 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 2a996adaedc6f..47aa120e71628 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3117,41 +3117,41 @@ static bool interp__builtin_cmp_mask(InterpState &S, CodePtr OpPC,
unsigned VectorLen = LHS.getNumElems();
PrimType ElemT = LHS.getFieldDesc()->getPrimType();
- for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- const APSInt &A = LHS.elem<T>(ElemNum).toAPSInt();
- const APSInt &B = RHS.elem<T>(ElemNum).toAPSInt();
- bool Result = false;
- switch (Opcode.getExtValue() & 0x7) {
- case 0x00: // _MM_CMPINT_EQ
- Result = (A == B);
- break;
- case 0x01: // _MM_CMPINT_LT
- Result = IsUnsigned ? A.ult(B) : A.slt(B);
- break;
- case 0x02: // _MM_CMPINT_LE
- Result = IsUnsigned ? A.ule(B) : A.sle(B);
- break;
- case 0x03: // _MM_CMPINT_FALSE
- Result = false;
- break;
- case 0x04: // _MM_CMPINT_NE
- Result = (A != B);
- break;
- case 0x05: // _MM_CMPINT_NLT (>=)
- Result = IsUnsigned ? A.uge(B) : A.sge(B);
- break;
- case 0x06: // _MM_CMPINT_NLE (>)
- Result = IsUnsigned ? A.ugt(B) : A.sgt(B);
- break;
- case 0x07: // _MM_CMPINT_TRUE
- Result = true;
- break;
- }
+ for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ const APSInt &A = LHS.elem<T>(ElemNum).toAPSInt();
+ const APSInt &B = RHS.elem<T>(ElemNum).toAPSInt();
+ bool Result = false;
+ switch (Opcode.getExtValue() & 0x7) {
+ case 0x00: // _MM_CMPINT_EQ
+ Result = (A == B);
+ break;
+ case 0x01: // _MM_CMPINT_LT
+ Result = IsUnsigned ? A.ult(B) : A.slt(B);
+ break;
+ case 0x02: // _MM_CMPINT_LE
+ Result = IsUnsigned ? A.ule(B) : A.sle(B);
+ break;
+ case 0x03: // _MM_CMPINT_FALSE
+ Result = false;
+ break;
+ case 0x04: // _MM_CMPINT_NE
+ Result = (A != B);
+ break;
+ case 0x05: // _MM_CMPINT_NLT (>=)
+ Result = IsUnsigned ? A.uge(B) : A.sge(B);
+ break;
+ case 0x06: // _MM_CMPINT_NLE (>)
+ Result = IsUnsigned ? A.ugt(B) : A.sgt(B);
+ break;
+ case 0x07: // _MM_CMPINT_TRUE
+ Result = true;
+ break;
+ }
- RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
- });
- }
+ RetMask.setBitVal(ElemNum, Mask[ElemNum] && Result);
+ });
+ }
pushInteger(S, RetMask, Call->getType());
return true;
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 4508029ed2494..6846241e92c80 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -73,667 +73,651 @@
using namespace clang;
using llvm::APFixedPoint;
+using llvm::APFloat;
using llvm::APInt;
using llvm::APSInt;
-using llvm::APFloat;
using llvm::FixedPointSemantics;
namespace {
- struct LValue;
- class CallStackFrame;
- class EvalInfo;
+struct LValue;
+class CallStackFrame;
+class EvalInfo;
- using SourceLocExprScopeGuard =
- CurrentSourceLocExprScope::SourceLocExprScopeGuard;
+using SourceLocExprScopeGuard =
+ CurrentSourceLocExprScope::SourceLocExprScopeGuard;
- static QualType getType(APValue::LValueBase B) {
- return B.getType();
- }
+static QualType getType(APValue::LValueBase B) { return B.getType(); }
- /// Get an LValue path entry, which is known to not be an array index, as a
- /// field declaration.
- static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
- return dyn_cast_or_null<FieldDecl>(E.getAsBaseOrMember().getPointer());
- }
- /// Get an LValue path entry, which is known to not be an array index, as a
- /// base class declaration.
- static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
- return dyn_cast_or_null<CXXRecordDecl>(E.getAsBaseOrMember().getPointer());
- }
- /// Determine whether this LValue path entry for a base class names a virtual
- /// base class.
- static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
- return E.getAsBaseOrMember().getInt();
- }
+/// Get an LValue path entry, which is known to not be an array index, as a
+/// field declaration.
+static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
+ return dyn_cast_or_null<FieldDecl>(E.getAsBaseOrMember().getPointer());
+}
+/// Get an LValue path entry, which is known to not be an array index, as a
+/// base class declaration.
+static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
+ return dyn_cast_or_null<CXXRecordDecl>(E.getAsBaseOrMember().getPointer());
+}
+/// Determine whether this LValue path entry for a base class names a virtual
+/// base class.
+static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
+ return E.getAsBaseOrMember().getInt();
+}
- /// Given an expression, determine the type used to store the result of
- /// evaluating that expression.
- static QualType getStorageType(const ASTContext &Ctx, const Expr *E) {
- if (E->isPRValue())
- return E->getType();
- return Ctx.getLValueReferenceType(E->getType());
- }
+/// Given an expression, determine the type used to store the result of
+/// evaluating that expression.
+static QualType getStorageType(const ASTContext &Ctx, const Expr *E) {
+ if (E->isPRValue())
+ return E->getType();
+ return Ctx.getLValueReferenceType(E->getType());
+}
- /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
- /// This will look through a single cast.
- ///
- /// Returns null if we couldn't unwrap a function with alloc_size.
- static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
- if (!E->getType()->isPointerType())
- return nullptr;
+/// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
+/// This will look through a single cast.
+///
+/// Returns null if we couldn't unwrap a function with alloc_size.
+static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
+ if (!E->getType()->isPointerType())
+ return nullptr;
- E = E->IgnoreParens();
- // If we're doing a variable assignment from e.g. malloc(N), there will
- // probably be a cast of some kind. In exotic cases, we might also see a
- // top-level ExprWithCleanups. Ignore them either way.
- if (const auto *FE = dyn_cast<FullExpr>(E))
- E = FE->getSubExpr()->IgnoreParens();
+ E = E->IgnoreParens();
+ // If we're doing a variable assignment from e.g. malloc(N), there will
+ // probably be a cast of some kind. In exotic cases, we might also see a
+ // top-level ExprWithCleanups. Ignore them either way.
+ if (const auto *FE = dyn_cast<FullExpr>(E))
+ E = FE->getSubExpr()->IgnoreParens();
- if (const auto *Cast = dyn_cast<CastExpr>(E))
- E = Cast->getSubExpr()->IgnoreParens();
+ if (const auto *Cast = dyn_cast<CastExpr>(E))
+ E = Cast->getSubExpr()->IgnoreParens();
- if (const auto *CE = dyn_cast<CallExpr>(E))
- return CE->getCalleeAllocSizeAttr() ? CE : nullptr;
- return nullptr;
- }
+ if (const auto *CE = dyn_cast<CallExpr>(E))
+ return CE->getCalleeAllocSizeAttr() ? CE : nullptr;
+ return nullptr;
+}
- /// Determines whether or not the given Base contains a call to a function
- /// with the alloc_size attribute.
- static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
- const auto *E = Base.dyn_cast<const Expr *>();
- return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
- }
+/// Determines whether or not the given Base contains a call to a function
+/// with the alloc_size attribute.
+static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
+ const auto *E = Base.dyn_cast<const Expr *>();
+ return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
+}
- /// Determines whether the given kind of constant expression is only ever
- /// used for name mangling. If so, it's permitted to reference things that we
- /// can't generate code for (in particular, dllimported functions).
- static bool isForManglingOnly(ConstantExprKind Kind) {
- switch (Kind) {
- case ConstantExprKind::Normal:
- case ConstantExprKind::ClassTemplateArgument:
- case ConstantExprKind::ImmediateInvocation:
- // Note that non-type template arguments of class type are emitted as
- // template parameter objects.
- return false;
+/// Determines whether the given kind of constant expression is only ever
+/// used for name mangling. If so, it's permitted to reference things that we
+/// can't generate code for (in particular, dllimported functions).
+static bool isForManglingOnly(ConstantExprKind Kind) {
+ switch (Kind) {
+ case ConstantExprKind::Normal:
+ case ConstantExprKind::ClassTemplateArgument:
+ case ConstantExprKind::ImmediateInvocation:
+ // Note that non-type template arguments of class type are emitted as
+ // template parameter objects.
+ return false;
- case ConstantExprKind::NonClassTemplateArgument:
- return true;
- }
- llvm_unreachable("unknown ConstantExprKind");
+ case ConstantExprKind::NonClassTemplateArgument:
+ return true;
}
+ llvm_unreachable("unknown ConstantExprKind");
+}
- static bool isTemplateArgument(ConstantExprKind Kind) {
- switch (Kind) {
- case ConstantExprKind::Normal:
- case ConstantExprKind::ImmediateInvocation:
- return false;
+static bool isTemplateArgument(ConstantExprKind Kind) {
+ switch (Kind) {
+ case ConstantExprKind::Normal:
+ case ConstantExprKind::ImmediateInvocation:
+ return false;
- case ConstantExprKind::ClassTemplateArgument:
- case ConstantExprKind::NonClassTemplateArgument:
- return true;
- }
- llvm_unreachable("unknown ConstantExprKind");
- }
-
- /// The bound to claim that an array of unknown bound has.
- /// The value in MostDerivedArraySize is undefined in this case. So, set it
- /// to an arbitrary value that's likely to loudly break things if it's used.
- static const uint64_t AssumedSizeForUnsizedArray =
- std::numeric_limits<uint64_t>::max() / 2;
-
- /// Determines if an LValue with the given LValueBase will have an unsized
- /// array in its designator.
- /// Find the path length and type of the most-derived subobject in the given
- /// path, and find the size of the containing array, if any.
- static unsigned
- findMostDerivedSubobject(const ASTContext &Ctx, APValue::LValueBase Base,
- ArrayRef<APValue::LValuePathEntry> Path,
- uint64_t &ArraySize, QualType &Type, bool &IsArray,
- bool &FirstEntryIsUnsizedArray) {
- // This only accepts LValueBases from APValues, and APValues don't support
- // arrays that lack size info.
- assert(!isBaseAnAllocSizeCall(Base) &&
- "Unsized arrays shouldn't appear here");
- unsigned MostDerivedLength = 0;
- // The type of Base is a reference type if the base is a constexpr-unknown
- // variable. In that case, look through the reference type.
- Type = getType(Base).getNonReferenceType();
-
- for (unsigned I = 0, N = Path.size(); I != N; ++I) {
- if (Type->isArrayType()) {
- const ArrayType *AT = Ctx.getAsArrayType(Type);
- Type = AT->getElementType();
- MostDerivedLength = I + 1;
- IsArray = true;
-
- if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
- ArraySize = CAT->getZExtSize();
- } else {
- assert(I == 0 && "unexpected unsized array designator");
- FirstEntryIsUnsizedArray = true;
- ArraySize = AssumedSizeForUnsizedArray;
- }
- } else if (Type->isAnyComplexType()) {
- const ComplexType *CT = Type->castAs<ComplexType>();
- Type = CT->getElementType();
- ArraySize = 2;
- MostDerivedLength = I + 1;
- IsArray = true;
- } else if (const auto *VT = Type->getAs<VectorType>()) {
- Type = VT->getElementType();
- ArraySize = VT->getNumElements();
- MostDerivedLength = I + 1;
- IsArray = true;
- } else if (const FieldDecl *FD = getAsField(Path[I])) {
- Type = FD->getType();
- ArraySize = 0;
- MostDerivedLength = I + 1;
- IsArray = false;
+ case ConstantExprKind::ClassTemplateArgument:
+ case ConstantExprKind::NonClassTemplateArgument:
+ return true;
+ }
+ llvm_unreachable("unknown ConstantExprKind");
+}
+
+/// The bound to claim that an array of unknown bound has.
+/// The value in MostDerivedArraySize is undefined in this case. So, set it
+/// to an arbitrary value that's likely to loudly break things if it's used.
+static const uint64_t AssumedSizeForUnsizedArray =
+ std::numeric_limits<uint64_t>::max() / 2;
+
+/// Determines if an LValue with the given LValueBase will have an unsized
+/// array in its designator.
+/// Find the path length and type of the most-derived subobject in the given
+/// path, and find the size of the containing array, if any.
+static unsigned
+findMostDerivedSubobject(const ASTContext &Ctx, APValue::LValueBase Base,
+ ArrayRef<APValue::LValuePathEntry> Path,
+ uint64_t &ArraySize, QualType &Type, bool &IsArray,
+ bool &FirstEntryIsUnsizedArray) {
+ // This only accepts LValueBases from APValues, and APValues don't support
+ // arrays that lack size info.
+ assert(!isBaseAnAllocSizeCall(Base) &&
+ "Unsized arrays shouldn't appear here");
+ unsigned MostDerivedLength = 0;
+ // The type of Base is a reference type if the base is a constexpr-unknown
+ // variable. In that case, look through the reference type.
+ Type = getType(Base).getNonReferenceType();
+
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (Type->isArrayType()) {
+ const ArrayType *AT = Ctx.getAsArrayType(Type);
+ Type = AT->getElementType();
+ MostDerivedLength = I + 1;
+ IsArray = true;
+
+ if (auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ ArraySize = CAT->getZExtSize();
} else {
- // Path[I] describes a base class.
- ArraySize = 0;
- IsArray = false;
- }
- }
- return MostDerivedLength;
- }
-
- /// A path from a glvalue to a subobject of that glvalue.
- struct SubobjectDesignator {
- /// True if the subobject was named in a manner not supported by C++11. Such
- /// lvalues can still be folded, but they are not core constant expressions
- /// and we cannot perform lvalue-to-rvalue conversions on them.
- LLVM_PREFERRED_TYPE(bool)
- unsigned Invalid : 1;
-
- /// Is this a pointer one past the end of an object?
- LLVM_PREFERRED_TYPE(bool)
- unsigned IsOnePastTheEnd : 1;
-
- /// Indicator of whether the first entry is an unsized array.
- LLVM_PREFERRED_TYPE(bool)
- unsigned FirstEntryIsAnUnsizedArray : 1;
-
- /// Indicator of whether the most-derived object is an array element.
- LLVM_PREFERRED_TYPE(bool)
- unsigned MostDerivedIsArrayElement : 1;
-
- /// The length of the path to the most-derived object of which this is a
- /// subobject.
- unsigned MostDerivedPathLength : 28;
-
- /// The size of the array of which the most-derived object is an element.
- /// This will always be 0 if the most-derived object is not an array
- /// element. 0 is not an indicator of whether or not the most-derived object
- /// is an array, however, because 0-length arrays are allowed.
- ///
- /// If the current array is an unsized array, the value of this is
- /// undefined.
- uint64_t MostDerivedArraySize;
- /// The type of the most derived object referred to by this address.
- QualType MostDerivedType;
-
- typedef APValue::LValuePathEntry PathEntry;
-
- /// The entries on the path from the glvalue to the designated subobject.
- SmallVector<PathEntry, 8> Entries;
-
- SubobjectDesignator() : Invalid(true) {}
-
- explicit SubobjectDesignator(QualType T)
- : Invalid(false), IsOnePastTheEnd(false),
- FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
- MostDerivedPathLength(0), MostDerivedArraySize(0),
- MostDerivedType(T.isNull() ? QualType() : T.getNonReferenceType()) {}
-
- SubobjectDesignator(const ASTContext &Ctx, const APValue &V)
- : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
- FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
- MostDerivedPathLength(0), MostDerivedArraySize(0) {
- assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
- if (!Invalid) {
- IsOnePastTheEnd = V.isLValueOnePastTheEnd();
- llvm::append_range(Entries, V.getLValuePath());
- if (V.getLValueBase()) {
- bool IsArray = false;
- bool FirstIsUnsizedArray = false;
- MostDerivedPathLength = findMostDerivedSubobject(
- Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize,
- MostDerivedType, IsArray, FirstIsUnsizedArray);
- MostDerivedIsArrayElement = IsArray;
- FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
- }
+ assert(I == 0 && "unexpected unsized array designator");
+ FirstEntryIsUnsizedArray = true;
+ ArraySize = AssumedSizeForUnsizedArray;
}
+ } else if (Type->isAnyComplexType()) {
+ const ComplexType *CT = Type->castAs<ComplexType>();
+ Type = CT->getElementType();
+ ArraySize = 2;
+ MostDerivedLength = I + 1;
+ IsArray = true;
+ } else if (const auto *VT = Type->getAs<VectorType>()) {
+ Type = VT->getElementType();
+ ArraySize = VT->getNumElements();
+ MostDerivedLength = I + 1;
+ IsArray = true;
+ } else if (const FieldDecl *FD = getAsField(Path[I])) {
+ Type = FD->getType();
+ ArraySize = 0;
+ MostDerivedLength = I + 1;
+ IsArray = false;
+ } else {
+ // Path[I] describes a base class.
+ ArraySize = 0;
+ IsArray = false;
}
+ }
+ return MostDerivedLength;
+}
- void truncate(ASTContext &Ctx, APValue::LValueBase Base,
- unsigned NewLength) {
- if (Invalid)
- return;
+/// A path from a glvalue to a subobject of that glvalue.
+struct SubobjectDesignator {
+ /// True if the subobject was named in a manner not supported by C++11. Such
+ /// lvalues can still be folded, but they are not core constant expressions
+ /// and we cannot perform lvalue-to-rvalue conversions on them.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned Invalid : 1;
- assert(Base && "cannot truncate path for null pointer");
- assert(NewLength <= Entries.size() && "not a truncation");
+ /// Is this a pointer one past the end of an object?
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned IsOnePastTheEnd : 1;
- if (NewLength == Entries.size())
- return;
- Entries.resize(NewLength);
+ /// Indicator of whether the first entry is an unsized array.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned FirstEntryIsAnUnsizedArray : 1;
- bool IsArray = false;
- bool FirstIsUnsizedArray = false;
- MostDerivedPathLength = findMostDerivedSubobject(
- Ctx, Base, Entries, MostDerivedArraySize, MostDerivedType, IsArray,
- FirstIsUnsizedArray);
- MostDerivedIsArrayElement = IsArray;
- FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
- }
+ /// Indicator of whether the most-derived object is an array element.
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned MostDerivedIsArrayElement : 1;
- void setInvalid() {
- Invalid = true;
- Entries.clear();
- }
+ /// The length of the path to the most-derived object of which this is a
+ /// subobject.
+ unsigned MostDerivedPathLength : 28;
- /// Determine whether the most derived subobject is an array without a
- /// known bound.
- bool isMostDerivedAnUnsizedArray() const {
- assert(!Invalid && "Calling this makes no sense on invalid designators");
- return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
+ /// The size of the array of which the most-derived object is an element.
+ /// This will always be 0 if the most-derived object is not an array
+ /// element. 0 is not an indicator of whether or not the most-derived object
+ /// is an array, however, because 0-length arrays are allowed.
+ ///
+ /// If the current array is an unsized array, the value of this is
+ /// undefined.
+ uint64_t MostDerivedArraySize;
+ /// The type of the most derived object referred to by this address.
+ QualType MostDerivedType;
+
+ typedef APValue::LValuePathEntry PathEntry;
+
+ /// The entries on the path from the glvalue to the designated subobject.
+ SmallVector<PathEntry, 8> Entries;
+
+ SubobjectDesignator() : Invalid(true) {}
+
+ explicit SubobjectDesignator(QualType T)
+ : Invalid(false), IsOnePastTheEnd(false),
+ FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
+ MostDerivedPathLength(0), MostDerivedArraySize(0),
+ MostDerivedType(T.isNull() ? QualType() : T.getNonReferenceType()) {}
+
+ SubobjectDesignator(const ASTContext &Ctx, const APValue &V)
+ : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
+ FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
+ MostDerivedPathLength(0), MostDerivedArraySize(0) {
+ assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
+ if (!Invalid) {
+ IsOnePastTheEnd = V.isLValueOnePastTheEnd();
+ llvm::append_range(Entries, V.getLValuePath());
+ if (V.getLValueBase()) {
+ bool IsArray = false;
+ bool FirstIsUnsizedArray = false;
+ MostDerivedPathLength = findMostDerivedSubobject(
+ Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize,
+ MostDerivedType, IsArray, FirstIsUnsizedArray);
+ MostDerivedIsArrayElement = IsArray;
+ FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
+ }
}
+ }
- /// Determine what the most derived array's size is. Results in an assertion
- /// failure if the most derived array lacks a size.
- uint64_t getMostDerivedArraySize() const {
- assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
- return MostDerivedArraySize;
- }
+ void truncate(ASTContext &Ctx, APValue::LValueBase Base, unsigned NewLength) {
+ if (Invalid)
+ return;
- /// Determine whether this is a one-past-the-end pointer.
- bool isOnePastTheEnd() const {
- assert(!Invalid);
- if (IsOnePastTheEnd)
- return true;
- if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
- Entries[MostDerivedPathLength - 1].getAsArrayIndex() ==
- MostDerivedArraySize)
- return true;
- return false;
- }
+ assert(Base && "cannot truncate path for null pointer");
+ assert(NewLength <= Entries.size() && "not a truncation");
- /// Get the range of valid index adjustments in the form
- /// {maximum value that can be subtracted from this pointer,
- /// maximum value that can be added to this pointer}
- std::pair<uint64_t, uint64_t> validIndexAdjustments() {
- if (Invalid || isMostDerivedAnUnsizedArray())
- return {0, 0};
+ if (NewLength == Entries.size())
+ return;
+ Entries.resize(NewLength);
- // [expr.add]p4: For the purposes of these operators, a pointer to a
- // nonarray object behaves the same as a pointer to the first element of
- // an array of length one with the type of the object as its element type.
- bool IsArray = MostDerivedPathLength == Entries.size() &&
- MostDerivedIsArrayElement;
- uint64_t ArrayIndex = IsArray ? Entries.back().getAsArrayIndex()
- : (uint64_t)IsOnePastTheEnd;
- uint64_t ArraySize =
- IsArray ? getMostDerivedArraySize() : (uint64_t)1;
- return {ArrayIndex, ArraySize - ArrayIndex};
- }
+ bool IsArray = false;
+ bool FirstIsUnsizedArray = false;
+ MostDerivedPathLength =
+ findMostDerivedSubobject(Ctx, Base, Entries, MostDerivedArraySize,
+ MostDerivedType, IsArray, FirstIsUnsizedArray);
+ MostDerivedIsArrayElement = IsArray;
+ FirstEntryIsAnUnsizedArray = FirstIsUnsizedArray;
+ }
- /// Check that this refers to a valid subobject.
- bool isValidSubobject() const {
- if (Invalid)
- return false;
- return !isOnePastTheEnd();
- }
- /// Check that this refers to a valid subobject, and if not, produce a
- /// relevant diagnostic and set the designator as invalid.
- bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
+ void setInvalid() {
+ Invalid = true;
+ Entries.clear();
+ }
- /// Get the type of the designated object.
- QualType getType(ASTContext &Ctx) const {
- assert(!Invalid && "invalid designator has no subobject type");
- return MostDerivedPathLength == Entries.size()
- ? MostDerivedType
- : Ctx.getCanonicalTagType(getAsBaseClass(Entries.back()));
- }
+ /// Determine whether the most derived subobject is an array without a
+ /// known bound.
+ bool isMostDerivedAnUnsizedArray() const {
+ assert(!Invalid && "Calling this makes no sense on invalid designators");
+ return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
+ }
- /// Update this designator to refer to the first element within this array.
- void addArrayUnchecked(const ConstantArrayType *CAT) {
- Entries.push_back(PathEntry::ArrayIndex(0));
+ /// Determine what the most derived array's size is. Results in an assertion
+ /// failure if the most derived array lacks a size.
+ uint64_t getMostDerivedArraySize() const {
+ assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
+ return MostDerivedArraySize;
+ }
- // This is a most-derived object.
- MostDerivedType = CAT->getElementType();
- MostDerivedIsArrayElement = true;
- MostDerivedArraySize = CAT->getZExtSize();
- MostDerivedPathLength = Entries.size();
- }
- /// Update this designator to refer to the first element within the array of
- /// elements of type T. This is an array of unknown size.
- void addUnsizedArrayUnchecked(QualType ElemTy) {
- Entries.push_back(PathEntry::ArrayIndex(0));
-
- MostDerivedType = ElemTy;
- MostDerivedIsArrayElement = true;
- // The value in MostDerivedArraySize is undefined in this case. So, set it
- // to an arbitrary value that's likely to loudly break things if it's
- // used.
- MostDerivedArraySize = AssumedSizeForUnsizedArray;
- MostDerivedPathLength = Entries.size();
- }
- /// Update this designator to refer to the given base or member of this
- /// object.
- void addDeclUnchecked(const Decl *D, bool Virtual = false) {
- Entries.push_back(APValue::BaseOrMemberType(D, Virtual));
-
- // If this isn't a base class, it's a new most-derived object.
- if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
- MostDerivedType = FD->getType();
- MostDerivedIsArrayElement = false;
- MostDerivedArraySize = 0;
- MostDerivedPathLength = Entries.size();
- }
- }
- /// Update this designator to refer to the given complex component.
- void addComplexUnchecked(QualType EltTy, bool Imag) {
- Entries.push_back(PathEntry::ArrayIndex(Imag));
+ /// Determine whether this is a one-past-the-end pointer.
+ bool isOnePastTheEnd() const {
+ assert(!Invalid);
+ if (IsOnePastTheEnd)
+ return true;
+ if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
+ Entries[MostDerivedPathLength - 1].getAsArrayIndex() ==
+ MostDerivedArraySize)
+ return true;
+ return false;
+ }
- // This is technically a most-derived object, though in practice this
- // is unlikely to matter.
- MostDerivedType = EltTy;
- MostDerivedIsArrayElement = true;
- MostDerivedArraySize = 2;
+ /// Get the range of valid index adjustments in the form
+ /// {maximum value that can be subtracted from this pointer,
+ /// maximum value that can be added to this pointer}
+ std::pair<uint64_t, uint64_t> validIndexAdjustments() {
+ if (Invalid || isMostDerivedAnUnsizedArray())
+ return {0, 0};
+
+ // [expr.add]p4: For the purposes of these operators, a pointer to a
+ // nonarray object behaves the same as a pointer to the first element of
+ // an array of length one with the type of the object as its element type.
+ bool IsArray =
+ MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement;
+ uint64_t ArrayIndex =
+ IsArray ? Entries.back().getAsArrayIndex() : (uint64_t)IsOnePastTheEnd;
+ uint64_t ArraySize = IsArray ? getMostDerivedArraySize() : (uint64_t)1;
+ return {ArrayIndex, ArraySize - ArrayIndex};
+ }
+
+ /// Check that this refers to a valid subobject.
+ bool isValidSubobject() const {
+ if (Invalid)
+ return false;
+ return !isOnePastTheEnd();
+ }
+ /// Check that this refers to a valid subobject, and if not, produce a
+ /// relevant diagnostic and set the designator as invalid.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
+
+ /// Get the type of the designated object.
+ QualType getType(ASTContext &Ctx) const {
+ assert(!Invalid && "invalid designator has no subobject type");
+ return MostDerivedPathLength == Entries.size()
+ ? MostDerivedType
+ : Ctx.getCanonicalTagType(getAsBaseClass(Entries.back()));
+ }
+
+ /// Update this designator to refer to the first element within this array.
+ void addArrayUnchecked(const ConstantArrayType *CAT) {
+ Entries.push_back(PathEntry::ArrayIndex(0));
+
+ // This is a most-derived object.
+ MostDerivedType = CAT->getElementType();
+ MostDerivedIsArrayElement = true;
+ MostDerivedArraySize = CAT->getZExtSize();
+ MostDerivedPathLength = Entries.size();
+ }
+ /// Update this designator to refer to the first element within the array of
+ /// elements of type T. This is an array of unknown size.
+ void addUnsizedArrayUnchecked(QualType ElemTy) {
+ Entries.push_back(PathEntry::ArrayIndex(0));
+
+ MostDerivedType = ElemTy;
+ MostDerivedIsArrayElement = true;
+ // The value in MostDerivedArraySize is undefined in this case. So, set it
+ // to an arbitrary value that's likely to loudly break things if it's
+ // used.
+ MostDerivedArraySize = AssumedSizeForUnsizedArray;
+ MostDerivedPathLength = Entries.size();
+ }
+ /// Update this designator to refer to the given base or member of this
+ /// object.
+ void addDeclUnchecked(const Decl *D, bool Virtual = false) {
+ Entries.push_back(APValue::BaseOrMemberType(D, Virtual));
+
+ // If this isn't a base class, it's a new most-derived object.
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ MostDerivedType = FD->getType();
+ MostDerivedIsArrayElement = false;
+ MostDerivedArraySize = 0;
MostDerivedPathLength = Entries.size();
}
+ }
+ /// Update this designator to refer to the given complex component.
+ void addComplexUnchecked(QualType EltTy, bool Imag) {
+ Entries.push_back(PathEntry::ArrayIndex(Imag));
- void addVectorElementUnchecked(QualType EltTy, uint64_t Size,
- uint64_t Idx) {
- Entries.push_back(PathEntry::ArrayIndex(Idx));
- MostDerivedType = EltTy;
- MostDerivedPathLength = Entries.size();
- MostDerivedArraySize = 0;
- MostDerivedIsArrayElement = false;
- }
+ // This is technically a most-derived object, though in practice this
+ // is unlikely to matter.
+ MostDerivedType = EltTy;
+ MostDerivedIsArrayElement = true;
+ MostDerivedArraySize = 2;
+ MostDerivedPathLength = Entries.size();
+ }
- void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E);
- void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
- const APSInt &N);
- /// Add N to the address of this subobject.
- void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N, const LValue &LV);
- };
+ void addVectorElementUnchecked(QualType EltTy, uint64_t Size, uint64_t Idx) {
+ Entries.push_back(PathEntry::ArrayIndex(Idx));
+ MostDerivedType = EltTy;
+ MostDerivedPathLength = Entries.size();
+ MostDerivedArraySize = 0;
+ MostDerivedIsArrayElement = false;
+ }
- /// A scope at the end of which an object can need to be destroyed.
- enum class ScopeKind {
- Block,
- FullExpression,
- Call
- };
+ void diagnoseUnsizedArrayPointerArithmetic(EvalInfo &Info, const Expr *E);
+ void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &N);
+ /// Add N to the address of this subobject.
+ void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N, const LValue &LV);
+};
- /// A reference to a particular call and its arguments.
- struct CallRef {
- CallRef() : OrigCallee(), CallIndex(0), Version() {}
- CallRef(const FunctionDecl *Callee, unsigned CallIndex, unsigned Version)
- : OrigCallee(Callee), CallIndex(CallIndex), Version(Version) {}
-
- explicit operator bool() const { return OrigCallee; }
-
- /// Get the parameter that the caller initialized, corresponding to the
- /// given parameter in the callee.
- const ParmVarDecl *getOrigParam(const ParmVarDecl *PVD) const {
- return OrigCallee ? OrigCallee->getParamDecl(PVD->getFunctionScopeIndex())
- : PVD;
- }
-
- /// The callee at the point where the arguments were evaluated. This might
- /// be different from the actual callee (a different redeclaration, or a
- /// virtual override), but this function's parameters are the ones that
- /// appear in the parameter map.
- const FunctionDecl *OrigCallee;
- /// The call index of the frame that holds the argument values.
- unsigned CallIndex;
- /// The version of the parameters corresponding to this call.
- unsigned Version;
- };
+/// A scope at the end of which an object can need to be destroyed.
+enum class ScopeKind { Block, FullExpression, Call };
+
+/// A reference to a particular call and its arguments.
+struct CallRef {
+ CallRef() : OrigCallee(), CallIndex(0), Version() {}
+ CallRef(const FunctionDecl *Callee, unsigned CallIndex, unsigned Version)
+ : OrigCallee(Callee), CallIndex(CallIndex), Version(Version) {}
+
+ explicit operator bool() const { return OrigCallee; }
+
+ /// Get the parameter that the caller initialized, corresponding to the
+ /// given parameter in the callee.
+ const ParmVarDecl *getOrigParam(const ParmVarDecl *PVD) const {
+ return OrigCallee ? OrigCallee->getParamDecl(PVD->getFunctionScopeIndex())
+ : PVD;
+ }
+
+ /// The callee at the point where the arguments were evaluated. This might
+ /// be different from the actual callee (a different redeclaration, or a
+ /// virtual override), but this function's parameters are the ones that
+ /// appear in the parameter map.
+ const FunctionDecl *OrigCallee;
+ /// The call index of the frame that holds the argument values.
+ unsigned CallIndex;
+ /// The version of the parameters corresponding to this call.
+ unsigned Version;
+};
- /// A stack frame in the constexpr call stack.
- class CallStackFrame : public interp::Frame {
- public:
- EvalInfo &Info;
+/// A stack frame in the constexpr call stack.
+class CallStackFrame : public interp::Frame {
+public:
+ EvalInfo &Info;
- /// Parent - The caller of this stack frame.
- CallStackFrame *Caller;
+ /// Parent - The caller of this stack frame.
+ CallStackFrame *Caller;
- /// Callee - The function which was called.
- const FunctionDecl *Callee;
+ /// Callee - The function which was called.
+ const FunctionDecl *Callee;
- /// This - The binding for the this pointer in this call, if any.
- const LValue *This;
+ /// This - The binding for the this pointer in this call, if any.
+ const LValue *This;
- /// CallExpr - The syntactical structure of member function calls
- const Expr *CallExpr;
+ /// CallExpr - The syntactical structure of member function calls
+ const Expr *CallExpr;
- /// Information on how to find the arguments to this call. Our arguments
- /// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
- /// key and this value as the version.
- CallRef Arguments;
+ /// Information on how to find the arguments to this call. Our arguments
+ /// are stored in our parent's CallStackFrame, using the ParmVarDecl* as a
+ /// key and this value as the version.
+ CallRef Arguments;
- /// Source location information about the default argument or default
- /// initializer expression we're evaluating, if any.
- CurrentSourceLocExprScope CurSourceLocExprScope;
+ /// Source location information about the default argument or default
+ /// initializer expression we're evaluating, if any.
+ CurrentSourceLocExprScope CurSourceLocExprScope;
- // Note that we intentionally use std::map here so that references to
- // values are stable.
- typedef std::pair<const void *, unsigned> MapKeyTy;
- typedef std::map<MapKeyTy, APValue> MapTy;
- /// Temporaries - Temporary lvalues materialized within this stack frame.
- MapTy Temporaries;
+ // Note that we intentionally use std::map here so that references to
+ // values are stable.
+ typedef std::pair<const void *, unsigned> MapKeyTy;
+ typedef std::map<MapKeyTy, APValue> MapTy;
+ /// Temporaries - Temporary lvalues materialized within this stack frame.
+ MapTy Temporaries;
- /// CallRange - The source range of the call expression for this call.
- SourceRange CallRange;
+ /// CallRange - The source range of the call expression for this call.
+ SourceRange CallRange;
- /// Index - The call index of this call.
- unsigned Index;
+ /// Index - The call index of this call.
+ unsigned Index;
- /// The stack of integers for tracking version numbers for temporaries.
- SmallVector<unsigned, 2> TempVersionStack = {1};
- unsigned CurTempVersion = TempVersionStack.back();
+ /// The stack of integers for tracking version numbers for temporaries.
+ SmallVector<unsigned, 2> TempVersionStack = {1};
+ unsigned CurTempVersion = TempVersionStack.back();
- unsigned getTempVersion() const { return TempVersionStack.back(); }
+ unsigned getTempVersion() const { return TempVersionStack.back(); }
- void pushTempVersion() {
- TempVersionStack.push_back(++CurTempVersion);
- }
+ void pushTempVersion() { TempVersionStack.push_back(++CurTempVersion); }
- void popTempVersion() {
- TempVersionStack.pop_back();
- }
+ void popTempVersion() { TempVersionStack.pop_back(); }
- CallRef createCall(const FunctionDecl *Callee) {
- return {Callee, Index, ++CurTempVersion};
- }
+ CallRef createCall(const FunctionDecl *Callee) {
+ return {Callee, Index, ++CurTempVersion};
+ }
- // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
- // on the overall stack usage of deeply-recursing constexpr evaluations.
- // (We should cache this map rather than recomputing it repeatedly.)
- // But let's try this and see how it goes; we can look into caching the map
- // as a later change.
+ // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
+ // on the overall stack usage of deeply-recursing constexpr evaluations.
+ // (We should cache this map rather than recomputing it repeatedly.)
+ // But let's try this and see how it goes; we can look into caching the map
+ // as a later change.
- /// LambdaCaptureFields - Mapping from captured variables/this to
- /// corresponding data members in the closure class.
- llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
- FieldDecl *LambdaThisCaptureField = nullptr;
+ /// LambdaCaptureFields - Mapping from captured variables/this to
+ /// corresponding data members in the closure class.
+ llvm::DenseMap<const ValueDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField = nullptr;
- CallStackFrame(EvalInfo &Info, SourceRange CallRange,
- const FunctionDecl *Callee, const LValue *This,
- const Expr *CallExpr, CallRef Arguments);
- ~CallStackFrame();
+ CallStackFrame(EvalInfo &Info, SourceRange CallRange,
+ const FunctionDecl *Callee, const LValue *This,
+ const Expr *CallExpr, CallRef Arguments);
+ ~CallStackFrame();
- // Return the temporary for Key whose version number is Version.
- APValue *getTemporary(const void *Key, unsigned Version) {
- MapKeyTy KV(Key, Version);
- auto LB = Temporaries.lower_bound(KV);
- if (LB != Temporaries.end() && LB->first == KV)
- return &LB->second;
- return nullptr;
- }
+ // Return the temporary for Key whose version number is Version.
+ APValue *getTemporary(const void *Key, unsigned Version) {
+ MapKeyTy KV(Key, Version);
+ auto LB = Temporaries.lower_bound(KV);
+ if (LB != Temporaries.end() && LB->first == KV)
+ return &LB->second;
+ return nullptr;
+ }
- // Return the current temporary for Key in the map.
- APValue *getCurrentTemporary(const void *Key) {
- auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
- if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
- return &std::prev(UB)->second;
- return nullptr;
- }
+ // Return the current temporary for Key in the map.
+ APValue *getCurrentTemporary(const void *Key) {
+ auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
+ if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
+ return &std::prev(UB)->second;
+ return nullptr;
+ }
- // Return the version number of the current temporary for Key.
- unsigned getCurrentTemporaryVersion(const void *Key) const {
- auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
- if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
- return std::prev(UB)->first.second;
- return 0;
- }
+ // Return the version number of the current temporary for Key.
+ unsigned getCurrentTemporaryVersion(const void *Key) const {
+ auto UB = Temporaries.upper_bound(MapKeyTy(Key, UINT_MAX));
+ if (UB != Temporaries.begin() && std::prev(UB)->first.first == Key)
+ return std::prev(UB)->first.second;
+ return 0;
+ }
- /// Allocate storage for an object of type T in this stack frame.
- /// Populates LV with a handle to the created object. Key identifies
- /// the temporary within the stack frame, and must not be reused without
- /// bumping the temporary version number.
- template<typename KeyT>
- APValue &createTemporary(const KeyT *Key, QualType T,
- ScopeKind Scope, LValue &LV);
+ /// Allocate storage for an object of type T in this stack frame.
+ /// Populates LV with a handle to the created object. Key identifies
+ /// the temporary within the stack frame, and must not be reused without
+ /// bumping the temporary version number.
+ template <typename KeyT>
+ APValue &createTemporary(const KeyT *Key, QualType T, ScopeKind Scope,
+ LValue &LV);
- /// Allocate storage for a parameter of a function call made in this frame.
- APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
+ /// Allocate storage for a parameter of a function call made in this frame.
+ APValue &createParam(CallRef Args, const ParmVarDecl *PVD, LValue &LV);
- void describe(llvm::raw_ostream &OS) const override;
+ void describe(llvm::raw_ostream &OS) const override;
- Frame *getCaller() const override { return Caller; }
- SourceRange getCallRange() const override { return CallRange; }
- const FunctionDecl *getCallee() const override { return Callee; }
+ Frame *getCaller() const override { return Caller; }
+ SourceRange getCallRange() const override { return CallRange; }
+ const FunctionDecl *getCallee() const override { return Callee; }
- bool isStdFunction() const {
- for (const DeclContext *DC = Callee; DC; DC = DC->getParent())
- if (DC->isStdNamespace())
- return true;
- return false;
- }
+ bool isStdFunction() const {
+ for (const DeclContext *DC = Callee; DC; DC = DC->getParent())
+ if (DC->isStdNamespace())
+ return true;
+ return false;
+ }
- /// Whether we're in a context where [[msvc::constexpr]] evaluation is
- /// permitted. See MSConstexprDocs for description of permitted contexts.
- bool CanEvalMSConstexpr = false;
+ /// Whether we're in a context where [[msvc::constexpr]] evaluation is
+ /// permitted. See MSConstexprDocs for description of permitted contexts.
+ bool CanEvalMSConstexpr = false;
- private:
- APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T,
- ScopeKind Scope);
- };
+private:
+ APValue &createLocal(APValue::LValueBase Base, const void *Key, QualType T,
+ ScopeKind Scope);
+};
- /// Temporarily override 'this'.
- class ThisOverrideRAII {
- public:
- ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
- : Frame(Frame), OldThis(Frame.This) {
- if (Enable)
- Frame.This = NewThis;
- }
- ~ThisOverrideRAII() {
- Frame.This = OldThis;
- }
- private:
- CallStackFrame &Frame;
- const LValue *OldThis;
- };
+/// Temporarily override 'this'.
+class ThisOverrideRAII {
+public:
+ ThisOverrideRAII(CallStackFrame &Frame, const LValue *NewThis, bool Enable)
+ : Frame(Frame), OldThis(Frame.This) {
+ if (Enable)
+ Frame.This = NewThis;
+ }
+ ~ThisOverrideRAII() { Frame.This = OldThis; }
- // A shorthand time trace scope struct, prints source range, for example
- // {"name":"EvaluateAsRValue","args":{"detail":"<test.cc:8:21, col:25>"}}}
- class ExprTimeTraceScope {
- public:
- ExprTimeTraceScope(const Expr *E, const ASTContext &Ctx, StringRef Name)
- : TimeScope(Name, [E, &Ctx] {
- return E->getSourceRange().printToString(Ctx.getSourceManager());
- }) {}
+private:
+ CallStackFrame &Frame;
+ const LValue *OldThis;
+};
- private:
- llvm::TimeTraceScope TimeScope;
- };
+// A shorthand time trace scope struct, prints source range, for example
+// {"name":"EvaluateAsRValue","args":{"detail":"<test.cc:8:21, col:25>"}}}
+class ExprTimeTraceScope {
+public:
+ ExprTimeTraceScope(const Expr *E, const ASTContext &Ctx, StringRef Name)
+ : TimeScope(Name, [E, &Ctx] {
+ return E->getSourceRange().printToString(Ctx.getSourceManager());
+ }) {}
- /// RAII object used to change the current ability of
- /// [[msvc::constexpr]] evaulation.
- struct MSConstexprContextRAII {
- CallStackFrame &Frame;
- bool OldValue;
- explicit MSConstexprContextRAII(CallStackFrame &Frame, bool Value)
- : Frame(Frame), OldValue(Frame.CanEvalMSConstexpr) {
- Frame.CanEvalMSConstexpr = Value;
- }
+private:
+ llvm::TimeTraceScope TimeScope;
+};
- ~MSConstexprContextRAII() { Frame.CanEvalMSConstexpr = OldValue; }
- };
-}
+/// RAII object used to change the current ability of
+/// [[msvc::constexpr]] evaulation.
+struct MSConstexprContextRAII {
+ CallStackFrame &Frame;
+ bool OldValue;
+ explicit MSConstexprContextRAII(CallStackFrame &Frame, bool Value)
+ : Frame(Frame), OldValue(Frame.CanEvalMSConstexpr) {
+ Frame.CanEvalMSConstexpr = Value;
+ }
-static bool HandleDestruction(EvalInfo &Info, const Expr *E,
- const LValue &This, QualType ThisType);
+ ~MSConstexprContextRAII() { Frame.CanEvalMSConstexpr = OldValue; }
+};
+} // namespace
+
+static bool HandleDestruction(EvalInfo &Info, const Expr *E, const LValue &This,
+ QualType ThisType);
static bool HandleDestruction(EvalInfo &Info, SourceLocation Loc,
APValue::LValueBase LVBase, APValue &Value,
QualType T);
namespace {
- /// A cleanup, and a flag indicating whether it is lifetime-extended.
- class Cleanup {
- llvm::PointerIntPair<APValue*, 2, ScopeKind> Value;
- APValue::LValueBase Base;
- QualType T;
+/// A cleanup, and a flag indicating whether it is lifetime-extended.
+class Cleanup {
+ llvm::PointerIntPair<APValue *, 2, ScopeKind> Value;
+ APValue::LValueBase Base;
+ QualType T;
- public:
- Cleanup(APValue *Val, APValue::LValueBase Base, QualType T,
- ScopeKind Scope)
- : Value(Val, Scope), Base(Base), T(T) {}
-
- /// Determine whether this cleanup should be performed at the end of the
- /// given kind of scope.
- bool isDestroyedAtEndOf(ScopeKind K) const {
- return (int)Value.getInt() >= (int)K;
- }
- bool endLifetime(EvalInfo &Info, bool RunDestructors) {
- if (RunDestructors) {
- SourceLocation Loc;
- if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
- Loc = VD->getLocation();
- else if (const Expr *E = Base.dyn_cast<const Expr*>())
- Loc = E->getExprLoc();
- return HandleDestruction(Info, Loc, Base, *Value.getPointer(), T);
- }
- *Value.getPointer() = APValue();
- return true;
- }
+public:
+ Cleanup(APValue *Val, APValue::LValueBase Base, QualType T, ScopeKind Scope)
+ : Value(Val, Scope), Base(Base), T(T) {}
+
+ /// Determine whether this cleanup should be performed at the end of the
+ /// given kind of scope.
+ bool isDestroyedAtEndOf(ScopeKind K) const {
+ return (int)Value.getInt() >= (int)K;
+ }
+ bool endLifetime(EvalInfo &Info, bool RunDestructors) {
+ if (RunDestructors) {
+ SourceLocation Loc;
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl *>())
+ Loc = VD->getLocation();
+ else if (const Expr *E = Base.dyn_cast<const Expr *>())
+ Loc = E->getExprLoc();
+ return HandleDestruction(Info, Loc, Base, *Value.getPointer(), T);
+ }
+ *Value.getPointer() = APValue();
+ return true;
+ }
- bool hasSideEffect() {
- return T.isDestructedType();
- }
- };
+ bool hasSideEffect() { return T.isDestructedType(); }
+};
- /// A reference to an object whose construction we are currently evaluating.
- struct ObjectUnderConstruction {
- APValue::LValueBase Base;
- ArrayRef<APValue::LValuePathEntry> Path;
- friend bool operator==(const ObjectUnderConstruction &LHS,
- const ObjectUnderConstruction &RHS) {
- return LHS.Base == RHS.Base && LHS.Path == RHS.Path;
- }
- friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) {
- return llvm::hash_combine(Obj.Base, Obj.Path);
- }
- };
- enum class ConstructionPhase {
- None,
- Bases,
- AfterBases,
- AfterFields,
- Destroying,
- DestroyingBases
- };
-}
+/// A reference to an object whose construction we are currently evaluating.
+struct ObjectUnderConstruction {
+ APValue::LValueBase Base;
+ ArrayRef<APValue::LValuePathEntry> Path;
+ friend bool operator==(const ObjectUnderConstruction &LHS,
+ const ObjectUnderConstruction &RHS) {
+ return LHS.Base == RHS.Base && LHS.Path == RHS.Path;
+ }
+ friend llvm::hash_code hash_value(const ObjectUnderConstruction &Obj) {
+ return llvm::hash_combine(Obj.Base, Obj.Path);
+ }
+};
+enum class ConstructionPhase {
+ None,
+ Bases,
+ AfterBases,
+ AfterFields,
+ Destroying,
+ DestroyingBases
+};
+} // namespace
namespace llvm {
-template<> struct DenseMapInfo<ObjectUnderConstruction> {
+template <> struct DenseMapInfo<ObjectUnderConstruction> {
using Base = DenseMapInfo<APValue::LValueBase>;
static ObjectUnderConstruction getEmptyKey() {
- return {Base::getEmptyKey(), {}}; }
+ return {Base::getEmptyKey(), {}};
+ }
static ObjectUnderConstruction getTombstoneKey() {
return {Base::getTombstoneKey(), {}};
}
@@ -745,655 +729,645 @@ template<> struct DenseMapInfo<ObjectUnderConstruction> {
return LHS == RHS;
}
};
-}
+} // namespace llvm
namespace {
- /// A dynamically-allocated heap object.
- struct DynAlloc {
- /// The value of this heap-allocated object.
- APValue Value;
- /// The allocating expression; used for diagnostics. Either a CXXNewExpr
- /// or a CallExpr (the latter is for direct calls to operator new inside
- /// std::allocator<T>::allocate).
- const Expr *AllocExpr = nullptr;
-
- enum Kind {
- New,
- ArrayNew,
- StdAllocator
- };
+/// A dynamically-allocated heap object.
+struct DynAlloc {
+ /// The value of this heap-allocated object.
+ APValue Value;
+ /// The allocating expression; used for diagnostics. Either a CXXNewExpr
+ /// or a CallExpr (the latter is for direct calls to operator new inside
+ /// std::allocator<T>::allocate).
+ const Expr *AllocExpr = nullptr;
+
+ enum Kind { New, ArrayNew, StdAllocator };
+
+ /// Get the kind of the allocation. This must match between allocation
+ /// and deallocation.
+ Kind getKind() const {
+ if (auto *NE = dyn_cast<CXXNewExpr>(AllocExpr))
+ return NE->isArray() ? ArrayNew : New;
+ assert(isa<CallExpr>(AllocExpr));
+ return StdAllocator;
+ }
+};
- /// Get the kind of the allocation. This must match between allocation
- /// and deallocation.
- Kind getKind() const {
- if (auto *NE = dyn_cast<CXXNewExpr>(AllocExpr))
- return NE->isArray() ? ArrayNew : New;
- assert(isa<CallExpr>(AllocExpr));
- return StdAllocator;
- }
- };
+struct DynAllocOrder {
+ bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const {
+ return L.getIndex() < R.getIndex();
+ }
+};
- struct DynAllocOrder {
- bool operator()(DynamicAllocLValue L, DynamicAllocLValue R) const {
- return L.getIndex() < R.getIndex();
- }
- };
+/// EvalInfo - This is a private struct used by the evaluator to capture
+/// information about a subexpression as it is folded. It retains information
+/// about the AST context, but also maintains information about the folded
+/// expression.
+///
+/// If an expression could be evaluated, it is still possible it is not a C
+/// "integer constant expression" or constant expression. If not, this struct
+/// captures information about how and why not.
+///
+/// One bit of information passed *into* the request for constant folding
+/// indicates whether the subexpression is "evaluated" or not according to C
+/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
+/// evaluate the expression regardless of what the RHS is, but C only allows
+/// certain things in certain situations.
+class EvalInfo : public interp::State {
+public:
+ ASTContext &Ctx;
- /// EvalInfo - This is a private struct used by the evaluator to capture
- /// information about a subexpression as it is folded. It retains information
- /// about the AST context, but also maintains information about the folded
- /// expression.
- ///
- /// If an expression could be evaluated, it is still possible it is not a C
- /// "integer constant expression" or constant expression. If not, this struct
- /// captures information about how and why not.
- ///
- /// One bit of information passed *into* the request for constant folding
- /// indicates whether the subexpression is "evaluated" or not according to C
- /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
- /// evaluate the expression regardless of what the RHS is, but C only allows
- /// certain things in certain situations.
- class EvalInfo : public interp::State {
- public:
- ASTContext &Ctx;
+ /// EvalStatus - Contains information about the evaluation.
+ Expr::EvalStatus &EvalStatus;
- /// EvalStatus - Contains information about the evaluation.
- Expr::EvalStatus &EvalStatus;
+ /// CurrentCall - The top of the constexpr call stack.
+ CallStackFrame *CurrentCall;
- /// CurrentCall - The top of the constexpr call stack.
- CallStackFrame *CurrentCall;
+ /// CallStackDepth - The number of calls in the call stack right now.
+ unsigned CallStackDepth;
- /// CallStackDepth - The number of calls in the call stack right now.
- unsigned CallStackDepth;
+ /// NextCallIndex - The next call index to assign.
+ unsigned NextCallIndex;
- /// NextCallIndex - The next call index to assign.
- unsigned NextCallIndex;
+ /// StepsLeft - The remaining number of evaluation steps we're permitted
+ /// to perform. This is essentially a limit for the number of statements
+ /// we will evaluate.
+ unsigned StepsLeft;
- /// StepsLeft - The remaining number of evaluation steps we're permitted
- /// to perform. This is essentially a limit for the number of statements
- /// we will evaluate.
- unsigned StepsLeft;
+ /// Enable the experimental new constant interpreter. If an expression is
+ /// not supported by the interpreter, an error is triggered.
+ bool EnableNewConstInterp;
- /// Enable the experimental new constant interpreter. If an expression is
- /// not supported by the interpreter, an error is triggered.
- bool EnableNewConstInterp;
+ /// BottomFrame - The frame in which evaluation started. This must be
+ /// initialized after CurrentCall and CallStackDepth.
+ CallStackFrame BottomFrame;
- /// BottomFrame - The frame in which evaluation started. This must be
- /// initialized after CurrentCall and CallStackDepth.
- CallStackFrame BottomFrame;
+ /// A stack of values whose lifetimes end at the end of some surrounding
+ /// evaluation frame.
+ llvm::SmallVector<Cleanup, 16> CleanupStack;
- /// A stack of values whose lifetimes end at the end of some surrounding
- /// evaluation frame.
- llvm::SmallVector<Cleanup, 16> CleanupStack;
+ /// EvaluatingDecl - This is the declaration whose initializer is being
+ /// evaluated, if any.
+ APValue::LValueBase EvaluatingDecl;
- /// EvaluatingDecl - This is the declaration whose initializer is being
- /// evaluated, if any.
- APValue::LValueBase EvaluatingDecl;
+ enum class EvaluatingDeclKind {
+ None,
+ /// We're evaluating the construction of EvaluatingDecl.
+ Ctor,
+ /// We're evaluating the destruction of EvaluatingDecl.
+ Dtor,
+ };
+ EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None;
+
+ /// EvaluatingDeclValue - This is the value being constructed for the
+ /// declaration whose initializer is being evaluated, if any.
+ APValue *EvaluatingDeclValue;
+
+ /// Stack of loops and 'switch' statements which we're currently
+ /// breaking/continuing; null entries are used to mark unlabeled
+ /// break/continue.
+ SmallVector<const Stmt *> BreakContinueStack;
+
+ /// Set of objects that are currently being constructed.
+ llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase>
+ ObjectsUnderConstruction;
+
+ /// Current heap allocations, along with the location where each was
+ /// allocated. We use std::map here because we need stable addresses
+ /// for the stored APValues.
+ std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs;
+
+ /// The number of heap allocations performed so far in this evaluation.
+ unsigned NumHeapAllocs = 0;
+
+ struct EvaluatingConstructorRAII {
+ EvalInfo &EI;
+ ObjectUnderConstruction Object;
+ bool DidInsert;
+ EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object,
+ bool HasBases)
+ : EI(EI), Object(Object) {
+ DidInsert =
+ EI.ObjectsUnderConstruction
+ .insert({Object, HasBases ? ConstructionPhase::Bases
+ : ConstructionPhase::AfterBases})
+ .second;
+ }
+ void finishedConstructingBases() {
+ EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
+ }
+ void finishedConstructingFields() {
+ EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields;
+ }
+ ~EvaluatingConstructorRAII() {
+ if (DidInsert)
+ EI.ObjectsUnderConstruction.erase(Object);
+ }
+ };
- enum class EvaluatingDeclKind {
- None,
- /// We're evaluating the construction of EvaluatingDecl.
- Ctor,
- /// We're evaluating the destruction of EvaluatingDecl.
- Dtor,
- };
- EvaluatingDeclKind IsEvaluatingDecl = EvaluatingDeclKind::None;
-
- /// EvaluatingDeclValue - This is the value being constructed for the
- /// declaration whose initializer is being evaluated, if any.
- APValue *EvaluatingDeclValue;
-
- /// Stack of loops and 'switch' statements which we're currently
- /// breaking/continuing; null entries are used to mark unlabeled
- /// break/continue.
- SmallVector<const Stmt *> BreakContinueStack;
-
- /// Set of objects that are currently being constructed.
- llvm::DenseMap<ObjectUnderConstruction, ConstructionPhase>
- ObjectsUnderConstruction;
-
- /// Current heap allocations, along with the location where each was
- /// allocated. We use std::map here because we need stable addresses
- /// for the stored APValues.
- std::map<DynamicAllocLValue, DynAlloc, DynAllocOrder> HeapAllocs;
-
- /// The number of heap allocations performed so far in this evaluation.
- unsigned NumHeapAllocs = 0;
-
- struct EvaluatingConstructorRAII {
- EvalInfo &EI;
- ObjectUnderConstruction Object;
- bool DidInsert;
- EvaluatingConstructorRAII(EvalInfo &EI, ObjectUnderConstruction Object,
- bool HasBases)
- : EI(EI), Object(Object) {
- DidInsert =
- EI.ObjectsUnderConstruction
- .insert({Object, HasBases ? ConstructionPhase::Bases
- : ConstructionPhase::AfterBases})
- .second;
- }
- void finishedConstructingBases() {
- EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterBases;
- }
- void finishedConstructingFields() {
- EI.ObjectsUnderConstruction[Object] = ConstructionPhase::AfterFields;
- }
- ~EvaluatingConstructorRAII() {
- if (DidInsert) EI.ObjectsUnderConstruction.erase(Object);
- }
- };
+ struct EvaluatingDestructorRAII {
+ EvalInfo &EI;
+ ObjectUnderConstruction Object;
+ bool DidInsert;
+ EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object)
+ : EI(EI), Object(Object) {
+ DidInsert = EI.ObjectsUnderConstruction
+ .insert({Object, ConstructionPhase::Destroying})
+ .second;
+ }
+ void startedDestroyingBases() {
+ EI.ObjectsUnderConstruction[Object] = ConstructionPhase::DestroyingBases;
+ }
+ ~EvaluatingDestructorRAII() {
+ if (DidInsert)
+ EI.ObjectsUnderConstruction.erase(Object);
+ }
+ };
- struct EvaluatingDestructorRAII {
- EvalInfo &EI;
- ObjectUnderConstruction Object;
- bool DidInsert;
- EvaluatingDestructorRAII(EvalInfo &EI, ObjectUnderConstruction Object)
- : EI(EI), Object(Object) {
- DidInsert = EI.ObjectsUnderConstruction
- .insert({Object, ConstructionPhase::Destroying})
- .second;
- }
- void startedDestroyingBases() {
- EI.ObjectsUnderConstruction[Object] =
- ConstructionPhase::DestroyingBases;
- }
- ~EvaluatingDestructorRAII() {
- if (DidInsert)
- EI.ObjectsUnderConstruction.erase(Object);
- }
- };
+ ConstructionPhase
+ isEvaluatingCtorDtor(APValue::LValueBase Base,
+ ArrayRef<APValue::LValuePathEntry> Path) {
+ return ObjectsUnderConstruction.lookup({Base, Path});
+ }
- ConstructionPhase
- isEvaluatingCtorDtor(APValue::LValueBase Base,
- ArrayRef<APValue::LValuePathEntry> Path) {
- return ObjectsUnderConstruction.lookup({Base, Path});
- }
+ /// If we're currently speculatively evaluating, the outermost call stack
+ /// depth at which we can mutate state, otherwise 0.
+ unsigned SpeculativeEvaluationDepth = 0;
- /// If we're currently speculatively evaluating, the outermost call stack
- /// depth at which we can mutate state, otherwise 0.
- unsigned SpeculativeEvaluationDepth = 0;
+ /// The current array initialization index, if we're performing array
+ /// initialization.
+ uint64_t ArrayInitIndex = -1;
- /// The current array initialization index, if we're performing array
- /// initialization.
- uint64_t ArrayInitIndex = -1;
+ /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
+ /// notes attached to it will also be stored, otherwise they will not be.
+ bool HasActiveDiagnostic;
- /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
- /// notes attached to it will also be stored, otherwise they will not be.
- bool HasActiveDiagnostic;
+ /// Have we emitted a diagnostic explaining why we couldn't constant
+ /// fold (not just why it's not strictly a constant expression)?
+ bool HasFoldFailureDiagnostic;
- /// Have we emitted a diagnostic explaining why we couldn't constant
- /// fold (not just why it's not strictly a constant expression)?
- bool HasFoldFailureDiagnostic;
+ EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
+ : Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr),
+ CallStackDepth(0), NextCallIndex(1),
+ StepsLeft(C.getLangOpts().ConstexprStepLimit),
+ EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
+ BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr,
+ /*This=*/nullptr,
+ /*CallExpr=*/nullptr, CallRef()),
+ EvaluatingDecl((const ValueDecl *)nullptr),
+ EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
+ HasFoldFailureDiagnostic(false) {
+ EvalMode = Mode;
+ }
- EvalInfo(const ASTContext &C, Expr::EvalStatus &S, EvaluationMode Mode)
- : Ctx(const_cast<ASTContext &>(C)), EvalStatus(S), CurrentCall(nullptr),
- CallStackDepth(0), NextCallIndex(1),
- StepsLeft(C.getLangOpts().ConstexprStepLimit),
- EnableNewConstInterp(C.getLangOpts().EnableNewConstInterp),
- BottomFrame(*this, SourceLocation(), /*Callee=*/nullptr,
- /*This=*/nullptr,
- /*CallExpr=*/nullptr, CallRef()),
- EvaluatingDecl((const ValueDecl *)nullptr),
- EvaluatingDeclValue(nullptr), HasActiveDiagnostic(false),
- HasFoldFailureDiagnostic(false) {
- EvalMode = Mode;
- }
+ ~EvalInfo() { discardCleanups(); }
- ~EvalInfo() {
- discardCleanups();
- }
+ ASTContext &getASTContext() const override { return Ctx; }
+ const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
- ASTContext &getASTContext() const override { return Ctx; }
- const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
+ void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
+ EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
+ EvaluatingDecl = Base;
+ IsEvaluatingDecl = EDK;
+ EvaluatingDeclValue = &Value;
+ }
- void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
- EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
- EvaluatingDecl = Base;
- IsEvaluatingDecl = EDK;
- EvaluatingDeclValue = &Value;
+ bool CheckCallLimit(SourceLocation Loc) {
+ // Don't perform any constexpr calls (other than the call we're checking)
+ // when checking a potential constant expression.
+ if (checkingPotentialConstantExpression() && CallStackDepth > 1)
+ return false;
+ if (NextCallIndex == 0) {
+ // NextCallIndex has wrapped around.
+ FFDiag(Loc, diag::note_constexpr_call_limit_exceeded);
+ return false;
}
-
- bool CheckCallLimit(SourceLocation Loc) {
- // Don't perform any constexpr calls (other than the call we're checking)
- // when checking a potential constant expression.
- if (checkingPotentialConstantExpression() && CallStackDepth > 1)
- return false;
- if (NextCallIndex == 0) {
- // NextCallIndex has wrapped around.
- FFDiag(Loc, diag::note_constexpr_call_limit_exceeded);
- return false;
- }
- if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
- return true;
- FFDiag(Loc, diag::note_constexpr_depth_limit_exceeded)
+ if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
+ return true;
+ FFDiag(Loc, diag::note_constexpr_depth_limit_exceeded)
<< getLangOpts().ConstexprCallDepth;
+ return false;
+ }
+
+ bool CheckArraySize(SourceLocation Loc, unsigned BitWidth, uint64_t ElemCount,
+ bool Diag) {
+ // FIXME: GH63562
+ // APValue stores array extents as unsigned,
+ // so anything that is greater that unsigned would overflow when
+ // constructing the array, we catch this here.
+ if (BitWidth > ConstantArrayType::getMaxSizeBits(Ctx) ||
+ ElemCount > uint64_t(std::numeric_limits<unsigned>::max())) {
+ if (Diag)
+ FFDiag(Loc, diag::note_constexpr_new_too_large) << ElemCount;
return false;
}
- bool CheckArraySize(SourceLocation Loc, unsigned BitWidth,
- uint64_t ElemCount, bool Diag) {
- // FIXME: GH63562
- // APValue stores array extents as unsigned,
- // so anything that is greater that unsigned would overflow when
- // constructing the array, we catch this here.
- if (BitWidth > ConstantArrayType::getMaxSizeBits(Ctx) ||
- ElemCount > uint64_t(std::numeric_limits<unsigned>::max())) {
- if (Diag)
- FFDiag(Loc, diag::note_constexpr_new_too_large) << ElemCount;
- return false;
- }
-
- // FIXME: GH63562
- // Arrays allocate an APValue per element.
- // We use the number of constexpr steps as a proxy for the maximum size
- // of arrays to avoid exhausting the system resources, as initialization
- // of each element is likely to take some number of steps anyway.
- uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
- if (Limit != 0 && ElemCount > Limit) {
- if (Diag)
- FFDiag(Loc, diag::note_constexpr_new_exceeds_limits)
- << ElemCount << Limit;
- return false;
- }
- return true;
+ // FIXME: GH63562
+ // Arrays allocate an APValue per element.
+ // We use the number of constexpr steps as a proxy for the maximum size
+ // of arrays to avoid exhausting the system resources, as initialization
+ // of each element is likely to take some number of steps anyway.
+ uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
+ if (Limit != 0 && ElemCount > Limit) {
+ if (Diag)
+ FFDiag(Loc, diag::note_constexpr_new_exceeds_limits)
+ << ElemCount << Limit;
+ return false;
}
+ return true;
+ }
- std::pair<CallStackFrame *, unsigned>
- getCallFrameAndDepth(unsigned CallIndex) {
- assert(CallIndex && "no call index in getCallFrameAndDepth");
- // We will eventually hit BottomFrame, which has Index 1, so Frame can't
- // be null in this loop.
- unsigned Depth = CallStackDepth;
- CallStackFrame *Frame = CurrentCall;
- while (Frame->Index > CallIndex) {
- Frame = Frame->Caller;
- --Depth;
- }
- if (Frame->Index == CallIndex)
- return {Frame, Depth};
- return {nullptr, 0};
+ std::pair<CallStackFrame *, unsigned>
+ getCallFrameAndDepth(unsigned CallIndex) {
+ assert(CallIndex && "no call index in getCallFrameAndDepth");
+ // We will eventually hit BottomFrame, which has Index 1, so Frame can't
+ // be null in this loop.
+ unsigned Depth = CallStackDepth;
+ CallStackFrame *Frame = CurrentCall;
+ while (Frame->Index > CallIndex) {
+ Frame = Frame->Caller;
+ --Depth;
}
+ if (Frame->Index == CallIndex)
+ return {Frame, Depth};
+ return {nullptr, 0};
+ }
- bool nextStep(const Stmt *S) {
- if (Ctx.getLangOpts().ConstexprStepLimit == 0)
- return true;
-
- if (!StepsLeft) {
- FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded);
- return false;
- }
- --StepsLeft;
+ bool nextStep(const Stmt *S) {
+ if (Ctx.getLangOpts().ConstexprStepLimit == 0)
return true;
- }
- APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV);
-
- std::optional<DynAlloc *> lookupDynamicAlloc(DynamicAllocLValue DA) {
- std::optional<DynAlloc *> Result;
- auto It = HeapAllocs.find(DA);
- if (It != HeapAllocs.end())
- Result = &It->second;
- return Result;
- }
-
- /// Get the allocated storage for the given parameter of the given call.
- APValue *getParamSlot(CallRef Call, const ParmVarDecl *PVD) {
- CallStackFrame *Frame = getCallFrameAndDepth(Call.CallIndex).first;
- return Frame ? Frame->getTemporary(Call.getOrigParam(PVD), Call.Version)
- : nullptr;
+ if (!StepsLeft) {
+ FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded);
+ return false;
}
+ --StepsLeft;
+ return true;
+ }
- /// Information about a stack frame for std::allocator<T>::[de]allocate.
- struct StdAllocatorCaller {
- unsigned FrameIndex;
- QualType ElemType;
- const Expr *Call;
- explicit operator bool() const { return FrameIndex != 0; };
- };
+ APValue *createHeapAlloc(const Expr *E, QualType T, LValue &LV);
- StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const {
- for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame;
- Call = Call->Caller) {
- const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Call->Callee);
- if (!MD)
- continue;
- const IdentifierInfo *FnII = MD->getIdentifier();
- if (!FnII || !FnII->isStr(FnName))
- continue;
+ std::optional<DynAlloc *> lookupDynamicAlloc(DynamicAllocLValue DA) {
+ std::optional<DynAlloc *> Result;
+ auto It = HeapAllocs.find(DA);
+ if (It != HeapAllocs.end())
+ Result = &It->second;
+ return Result;
+ }
- const auto *CTSD =
- dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
- if (!CTSD)
- continue;
+ /// Get the allocated storage for the given parameter of the given call.
+ APValue *getParamSlot(CallRef Call, const ParmVarDecl *PVD) {
+ CallStackFrame *Frame = getCallFrameAndDepth(Call.CallIndex).first;
+ return Frame ? Frame->getTemporary(Call.getOrigParam(PVD), Call.Version)
+ : nullptr;
+ }
- const IdentifierInfo *ClassII = CTSD->getIdentifier();
- const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
- if (CTSD->isInStdNamespace() && ClassII &&
- ClassII->isStr("allocator") && TAL.size() >= 1 &&
- TAL[0].getKind() == TemplateArgument::Type)
- return {Call->Index, TAL[0].getAsType(), Call->CallExpr};
- }
+ /// Information about a stack frame for std::allocator<T>::[de]allocate.
+ struct StdAllocatorCaller {
+ unsigned FrameIndex;
+ QualType ElemType;
+ const Expr *Call;
+ explicit operator bool() const { return FrameIndex != 0; };
+ };
- return {};
- }
+ StdAllocatorCaller getStdAllocatorCaller(StringRef FnName) const {
+ for (const CallStackFrame *Call = CurrentCall; Call != &BottomFrame;
+ Call = Call->Caller) {
+ const auto *MD = dyn_cast_or_null<CXXMethodDecl>(Call->Callee);
+ if (!MD)
+ continue;
+ const IdentifierInfo *FnII = MD->getIdentifier();
+ if (!FnII || !FnII->isStr(FnName))
+ continue;
- void performLifetimeExtension() {
- // Disable the cleanups for lifetime-extended temporaries.
- llvm::erase_if(CleanupStack, [](Cleanup &C) {
- return !C.isDestroyedAtEndOf(ScopeKind::FullExpression);
- });
- }
+ const auto *CTSD =
+ dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
+ if (!CTSD)
+ continue;
- /// Throw away any remaining cleanups at the end of evaluation. If any
- /// cleanups would have had a side-effect, note that as an unmodeled
- /// side-effect and return false. Otherwise, return true.
- bool discardCleanups() {
- for (Cleanup &C : CleanupStack) {
- if (C.hasSideEffect() && !noteSideEffect()) {
- CleanupStack.clear();
- return false;
- }
- }
- CleanupStack.clear();
- return true;
+ const IdentifierInfo *ClassII = CTSD->getIdentifier();
+ const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
+ if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") &&
+ TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type)
+ return {Call->Index, TAL[0].getAsType(), Call->CallExpr};
}
- private:
- interp::Frame *getCurrentFrame() override { return CurrentCall; }
- const interp::Frame *getBottomFrame() const override { return &BottomFrame; }
+ return {};
+ }
- bool hasActiveDiagnostic() override { return HasActiveDiagnostic; }
- void setActiveDiagnostic(bool Flag) override { HasActiveDiagnostic = Flag; }
+ void performLifetimeExtension() {
+ // Disable the cleanups for lifetime-extended temporaries.
+ llvm::erase_if(CleanupStack, [](Cleanup &C) {
+ return !C.isDestroyedAtEndOf(ScopeKind::FullExpression);
+ });
+ }
- void setFoldFailureDiagnostic(bool Flag) override {
- HasFoldFailureDiagnostic = Flag;
+ /// Throw away any remaining cleanups at the end of evaluation. If any
+ /// cleanups would have had a side-effect, note that as an unmodeled
+ /// side-effect and return false. Otherwise, return true.
+ bool discardCleanups() {
+ for (Cleanup &C : CleanupStack) {
+ if (C.hasSideEffect() && !noteSideEffect()) {
+ CleanupStack.clear();
+ return false;
+ }
}
+ CleanupStack.clear();
+ return true;
+ }
- Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; }
+private:
+ interp::Frame *getCurrentFrame() override { return CurrentCall; }
+ const interp::Frame *getBottomFrame() const override { return &BottomFrame; }
- // If we have a prior diagnostic, it will be noting that the expression
- // isn't a constant expression. This diagnostic is more important,
- // unless we require this evaluation to produce a constant expression.
- //
- // FIXME: We might want to show both diagnostics to the user in
- // EvaluationMode::ConstantFold mode.
- bool hasPriorDiagnostic() override {
- if (!EvalStatus.Diag->empty()) {
- switch (EvalMode) {
- case EvaluationMode::ConstantFold:
- case EvaluationMode::IgnoreSideEffects:
- if (!HasFoldFailureDiagnostic)
- break;
- // We've already failed to fold something. Keep that diagnostic.
- [[fallthrough]];
- case EvaluationMode::ConstantExpression:
- case EvaluationMode::ConstantExpressionUnevaluated:
- setActiveDiagnostic(false);
- return true;
- }
- }
- return false;
- }
+ bool hasActiveDiagnostic() override { return HasActiveDiagnostic; }
+ void setActiveDiagnostic(bool Flag) override { HasActiveDiagnostic = Flag; }
- unsigned getCallStackDepth() override { return CallStackDepth; }
+ void setFoldFailureDiagnostic(bool Flag) override {
+ HasFoldFailureDiagnostic = Flag;
+ }
- public:
- /// Should we continue evaluation after encountering a side-effect that we
- /// couldn't model?
- bool keepEvaluatingAfterSideEffect() const override {
+ Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; }
+
+ // If we have a prior diagnostic, it will be noting that the expression
+ // isn't a constant expression. This diagnostic is more important,
+ // unless we require this evaluation to produce a constant expression.
+ //
+ // FIXME: We might want to show both diagnostics to the user in
+ // EvaluationMode::ConstantFold mode.
+ bool hasPriorDiagnostic() override {
+ if (!EvalStatus.Diag->empty()) {
switch (EvalMode) {
+ case EvaluationMode::ConstantFold:
case EvaluationMode::IgnoreSideEffects:
- return true;
-
+ if (!HasFoldFailureDiagnostic)
+ break;
+ // We've already failed to fold something. Keep that diagnostic.
+ [[fallthrough]];
case EvaluationMode::ConstantExpression:
case EvaluationMode::ConstantExpressionUnevaluated:
- case EvaluationMode::ConstantFold:
- // By default, assume any side effect might be valid in some other
- // evaluation of this expression from a different context.
- return checkingPotentialConstantExpression() ||
- checkingForUndefinedBehavior();
+ setActiveDiagnostic(false);
+ return true;
}
- llvm_unreachable("Missed EvalMode case");
}
+ return false;
+ }
- /// Note that we have had a side-effect, and determine whether we should
- /// keep evaluating.
- bool noteSideEffect() override {
- EvalStatus.HasSideEffects = true;
- return keepEvaluatingAfterSideEffect();
- }
+ unsigned getCallStackDepth() override { return CallStackDepth; }
- /// Should we continue evaluation after encountering undefined behavior?
- bool keepEvaluatingAfterUndefinedBehavior() {
- switch (EvalMode) {
- case EvaluationMode::IgnoreSideEffects:
- case EvaluationMode::ConstantFold:
- return true;
+public:
+ /// Should we continue evaluation after encountering a side-effect that we
+ /// couldn't model?
+ bool keepEvaluatingAfterSideEffect() const override {
+ switch (EvalMode) {
+ case EvaluationMode::IgnoreSideEffects:
+ return true;
- case EvaluationMode::ConstantExpression:
- case EvaluationMode::ConstantExpressionUnevaluated:
- return checkingForUndefinedBehavior();
- }
- llvm_unreachable("Missed EvalMode case");
+ case EvaluationMode::ConstantExpression:
+ case EvaluationMode::ConstantExpressionUnevaluated:
+ case EvaluationMode::ConstantFold:
+ // By default, assume any side effect might be valid in some other
+ // evaluation of this expression from a different context.
+ return checkingPotentialConstantExpression() ||
+ checkingForUndefinedBehavior();
}
+ llvm_unreachable("Missed EvalMode case");
+ }
- /// Note that we hit something that was technically undefined behavior, but
- /// that we can evaluate past it (such as signed overflow or floating-point
- /// division by zero.)
- bool noteUndefinedBehavior() override {
- EvalStatus.HasUndefinedBehavior = true;
- return keepEvaluatingAfterUndefinedBehavior();
+ /// Note that we have had a side-effect, and determine whether we should
+ /// keep evaluating.
+ bool noteSideEffect() override {
+ EvalStatus.HasSideEffects = true;
+ return keepEvaluatingAfterSideEffect();
+ }
+
+ /// Should we continue evaluation after encountering undefined behavior?
+ bool keepEvaluatingAfterUndefinedBehavior() {
+ switch (EvalMode) {
+ case EvaluationMode::IgnoreSideEffects:
+ case EvaluationMode::ConstantFold:
+ return true;
+
+ case EvaluationMode::ConstantExpression:
+ case EvaluationMode::ConstantExpressionUnevaluated:
+ return checkingForUndefinedBehavior();
}
+ llvm_unreachable("Missed EvalMode case");
+ }
- /// Should we continue evaluation as much as possible after encountering a
- /// construct which can't be reduced to a value?
- bool keepEvaluatingAfterFailure() const override {
- uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
- if (Limit != 0 && !StepsLeft)
- return false;
+ /// Note that we hit something that was technically undefined behavior, but
+ /// that we can evaluate past it (such as signed overflow or floating-point
+ /// division by zero.)
+ bool noteUndefinedBehavior() override {
+ EvalStatus.HasUndefinedBehavior = true;
+ return keepEvaluatingAfterUndefinedBehavior();
+ }
- switch (EvalMode) {
- case EvaluationMode::ConstantExpression:
- case EvaluationMode::ConstantExpressionUnevaluated:
- case EvaluationMode::ConstantFold:
- case EvaluationMode::IgnoreSideEffects:
- return checkingPotentialConstantExpression() ||
- checkingForUndefinedBehavior();
- }
- llvm_unreachable("Missed EvalMode case");
- }
-
- /// Notes that we failed to evaluate an expression that other expressions
- /// directly depend on, and determine if we should keep evaluating. This
- /// should only be called if we actually intend to keep evaluating.
- ///
- /// Call noteSideEffect() instead if we may be able to ignore the value that
- /// we failed to evaluate, e.g. if we failed to evaluate Foo() in:
- ///
- /// (Foo(), 1) // use noteSideEffect
- /// (Foo() || true) // use noteSideEffect
- /// Foo() + 1 // use noteFailure
- [[nodiscard]] bool noteFailure() {
- // Failure when evaluating some expression often means there is some
- // subexpression whose evaluation was skipped. Therefore, (because we
- // don't track whether we skipped an expression when unwinding after an
- // evaluation failure) every evaluation failure that bubbles up from a
- // subexpression implies that a side-effect has potentially happened. We
- // skip setting the HasSideEffects flag to true until we decide to
- // continue evaluating after that point, which happens here.
- bool KeepGoing = keepEvaluatingAfterFailure();
- EvalStatus.HasSideEffects |= KeepGoing;
- return KeepGoing;
- }
-
- class ArrayInitLoopIndex {
- EvalInfo &Info;
- uint64_t OuterIndex;
+ /// Should we continue evaluation as much as possible after encountering a
+ /// construct which can't be reduced to a value?
+ bool keepEvaluatingAfterFailure() const override {
+ uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit;
+ if (Limit != 0 && !StepsLeft)
+ return false;
- public:
- ArrayInitLoopIndex(EvalInfo &Info)
- : Info(Info), OuterIndex(Info.ArrayInitIndex) {
- Info.ArrayInitIndex = 0;
- }
- ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; }
+ switch (EvalMode) {
+ case EvaluationMode::ConstantExpression:
+ case EvaluationMode::ConstantExpressionUnevaluated:
+ case EvaluationMode::ConstantFold:
+ case EvaluationMode::IgnoreSideEffects:
+ return checkingPotentialConstantExpression() ||
+ checkingForUndefinedBehavior();
+ }
+ llvm_unreachable("Missed EvalMode case");
+ }
- operator uint64_t&() { return Info.ArrayInitIndex; }
- };
+ /// Notes that we failed to evaluate an expression that other expressions
+ /// directly depend on, and determine if we should keep evaluating. This
+ /// should only be called if we actually intend to keep evaluating.
+ ///
+ /// Call noteSideEffect() instead if we may be able to ignore the value that
+ /// we failed to evaluate, e.g. if we failed to evaluate Foo() in:
+ ///
+ /// (Foo(), 1) // use noteSideEffect
+ /// (Foo() || true) // use noteSideEffect
+ /// Foo() + 1 // use noteFailure
+ [[nodiscard]] bool noteFailure() {
+ // Failure when evaluating some expression often means there is some
+ // subexpression whose evaluation was skipped. Therefore, (because we
+ // don't track whether we skipped an expression when unwinding after an
+ // evaluation failure) every evaluation failure that bubbles up from a
+ // subexpression implies that a side-effect has potentially happened. We
+ // skip setting the HasSideEffects flag to true until we decide to
+ // continue evaluating after that point, which happens here.
+ bool KeepGoing = keepEvaluatingAfterFailure();
+ EvalStatus.HasSideEffects |= KeepGoing;
+ return KeepGoing;
+ }
+
+ class ArrayInitLoopIndex {
+ EvalInfo &Info;
+ uint64_t OuterIndex;
+
+ public:
+ ArrayInitLoopIndex(EvalInfo &Info)
+ : Info(Info), OuterIndex(Info.ArrayInitIndex) {
+ Info.ArrayInitIndex = 0;
+ }
+ ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; }
+
+ operator uint64_t &() { return Info.ArrayInitIndex; }
};
+};
- /// Object used to treat all foldable expressions as constant expressions.
- struct FoldConstant {
- EvalInfo &Info;
- bool Enabled;
- bool HadNoPriorDiags;
- EvaluationMode OldMode;
+/// Object used to treat all foldable expressions as constant expressions.
+struct FoldConstant {
+ EvalInfo &Info;
+ bool Enabled;
+ bool HadNoPriorDiags;
+ EvaluationMode OldMode;
- explicit FoldConstant(EvalInfo &Info, bool Enabled)
- : Info(Info),
- Enabled(Enabled),
- HadNoPriorDiags(Info.EvalStatus.Diag &&
- Info.EvalStatus.Diag->empty() &&
+ explicit FoldConstant(EvalInfo &Info, bool Enabled)
+ : Info(Info), Enabled(Enabled),
+ HadNoPriorDiags(Info.EvalStatus.Diag && Info.EvalStatus.Diag->empty() &&
!Info.EvalStatus.HasSideEffects),
OldMode(Info.EvalMode) {
- if (Enabled)
- Info.EvalMode = EvaluationMode::ConstantFold;
- }
- void keepDiagnostics() { Enabled = false; }
- ~FoldConstant() {
- if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
- !Info.EvalStatus.HasSideEffects)
- Info.EvalStatus.Diag->clear();
- Info.EvalMode = OldMode;
- }
- };
+ if (Enabled)
+ Info.EvalMode = EvaluationMode::ConstantFold;
+ }
+ void keepDiagnostics() { Enabled = false; }
+ ~FoldConstant() {
+ if (Enabled && HadNoPriorDiags && !Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects)
+ Info.EvalStatus.Diag->clear();
+ Info.EvalMode = OldMode;
+ }
+};
- /// RAII object used to set the current evaluation mode to ignore
- /// side-effects.
- struct IgnoreSideEffectsRAII {
- EvalInfo &Info;
- EvaluationMode OldMode;
- explicit IgnoreSideEffectsRAII(EvalInfo &Info)
- : Info(Info), OldMode(Info.EvalMode) {
- Info.EvalMode = EvaluationMode::IgnoreSideEffects;
- }
+/// RAII object used to set the current evaluation mode to ignore
+/// side-effects.
+struct IgnoreSideEffectsRAII {
+ EvalInfo &Info;
+ EvaluationMode OldMode;
+ explicit IgnoreSideEffectsRAII(EvalInfo &Info)
+ : Info(Info), OldMode(Info.EvalMode) {
+ Info.EvalMode = EvaluationMode::IgnoreSideEffects;
+ }
- ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
- };
+ ~IgnoreSideEffectsRAII() { Info.EvalMode = OldMode; }
+};
- /// RAII object used to optionally suppress diagnostics and side-effects from
- /// a speculative evaluation.
- class SpeculativeEvaluationRAII {
- EvalInfo *Info = nullptr;
- Expr::EvalStatus OldStatus;
- unsigned OldSpeculativeEvaluationDepth = 0;
+/// RAII object used to optionally suppress diagnostics and side-effects from
+/// a speculative evaluation.
+class SpeculativeEvaluationRAII {
+ EvalInfo *Info = nullptr;
+ Expr::EvalStatus OldStatus;
+ unsigned OldSpeculativeEvaluationDepth = 0;
- void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
- Info = Other.Info;
- OldStatus = Other.OldStatus;
- OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth;
- Other.Info = nullptr;
- }
+ void moveFromAndCancel(SpeculativeEvaluationRAII &&Other) {
+ Info = Other.Info;
+ OldStatus = Other.OldStatus;
+ OldSpeculativeEvaluationDepth = Other.OldSpeculativeEvaluationDepth;
+ Other.Info = nullptr;
+ }
- void maybeRestoreState() {
- if (!Info)
- return;
+ void maybeRestoreState() {
+ if (!Info)
+ return;
- Info->EvalStatus = OldStatus;
- Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth;
- }
+ Info->EvalStatus = OldStatus;
+ Info->SpeculativeEvaluationDepth = OldSpeculativeEvaluationDepth;
+ }
- public:
- SpeculativeEvaluationRAII() = default;
+public:
+ SpeculativeEvaluationRAII() = default;
- SpeculativeEvaluationRAII(
- EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
- : Info(&Info), OldStatus(Info.EvalStatus),
- OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) {
- Info.EvalStatus.Diag = NewDiag;
- Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1;
- }
+ SpeculativeEvaluationRAII(
+ EvalInfo &Info, SmallVectorImpl<PartialDiagnosticAt> *NewDiag = nullptr)
+ : Info(&Info), OldStatus(Info.EvalStatus),
+ OldSpeculativeEvaluationDepth(Info.SpeculativeEvaluationDepth) {
+ Info.EvalStatus.Diag = NewDiag;
+ Info.SpeculativeEvaluationDepth = Info.CallStackDepth + 1;
+ }
- SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete;
- SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) {
- moveFromAndCancel(std::move(Other));
- }
+ SpeculativeEvaluationRAII(const SpeculativeEvaluationRAII &Other) = delete;
+ SpeculativeEvaluationRAII(SpeculativeEvaluationRAII &&Other) {
+ moveFromAndCancel(std::move(Other));
+ }
- SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) {
- maybeRestoreState();
- moveFromAndCancel(std::move(Other));
- return *this;
- }
+ SpeculativeEvaluationRAII &operator=(SpeculativeEvaluationRAII &&Other) {
+ maybeRestoreState();
+ moveFromAndCancel(std::move(Other));
+ return *this;
+ }
- ~SpeculativeEvaluationRAII() { maybeRestoreState(); }
- };
+ ~SpeculativeEvaluationRAII() { maybeRestoreState(); }
+};
- /// RAII object wrapping a full-expression or block scope, and handling
- /// the ending of the lifetime of temporaries created within it.
- template<ScopeKind Kind>
- class ScopeRAII {
- EvalInfo &Info;
- unsigned OldStackSize;
- public:
- ScopeRAII(EvalInfo &Info)
- : Info(Info), OldStackSize(Info.CleanupStack.size()) {
- // Push a new temporary version. This is needed to distinguish between
- // temporaries created in different iterations of a loop.
- Info.CurrentCall->pushTempVersion();
- }
- bool destroy(bool RunDestructors = true) {
- bool OK = cleanup(Info, RunDestructors, OldStackSize);
- OldStackSize = std::numeric_limits<unsigned>::max();
- return OK;
- }
- ~ScopeRAII() {
- if (OldStackSize != std::numeric_limits<unsigned>::max())
- destroy(false);
- // Body moved to a static method to encourage the compiler to inline away
- // instances of this class.
- Info.CurrentCall->popTempVersion();
- }
- private:
- static bool cleanup(EvalInfo &Info, bool RunDestructors,
- unsigned OldStackSize) {
- assert(OldStackSize <= Info.CleanupStack.size() &&
- "running cleanups out of order?");
+/// RAII object wrapping a full-expression or block scope, and handling
+/// the ending of the lifetime of temporaries created within it.
+template <ScopeKind Kind> class ScopeRAII {
+ EvalInfo &Info;
+ unsigned OldStackSize;
- // Run all cleanups for a block scope, and non-lifetime-extended cleanups
- // for a full-expression scope.
- bool Success = true;
- for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) {
- if (Info.CleanupStack[I - 1].isDestroyedAtEndOf(Kind)) {
- if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) {
- Success = false;
- break;
- }
+public:
+ ScopeRAII(EvalInfo &Info)
+ : Info(Info), OldStackSize(Info.CleanupStack.size()) {
+ // Push a new temporary version. This is needed to distinguish between
+ // temporaries created in different iterations of a loop.
+ Info.CurrentCall->pushTempVersion();
+ }
+ bool destroy(bool RunDestructors = true) {
+ bool OK = cleanup(Info, RunDestructors, OldStackSize);
+ OldStackSize = std::numeric_limits<unsigned>::max();
+ return OK;
+ }
+ ~ScopeRAII() {
+ if (OldStackSize != std::numeric_limits<unsigned>::max())
+ destroy(false);
+ // Body moved to a static method to encourage the compiler to inline away
+ // instances of this class.
+ Info.CurrentCall->popTempVersion();
+ }
+
+private:
+ static bool cleanup(EvalInfo &Info, bool RunDestructors,
+ unsigned OldStackSize) {
+ assert(OldStackSize <= Info.CleanupStack.size() &&
+ "running cleanups out of order?");
+
+ // Run all cleanups for a block scope, and non-lifetime-extended cleanups
+ // for a full-expression scope.
+ bool Success = true;
+ for (unsigned I = Info.CleanupStack.size(); I > OldStackSize; --I) {
+ if (Info.CleanupStack[I - 1].isDestroyedAtEndOf(Kind)) {
+ if (!Info.CleanupStack[I - 1].endLifetime(Info, RunDestructors)) {
+ Success = false;
+ break;
}
}
-
- // Compact any retained cleanups.
- auto NewEnd = Info.CleanupStack.begin() + OldStackSize;
- if (Kind != ScopeKind::Block)
- NewEnd =
- std::remove_if(NewEnd, Info.CleanupStack.end(), [](Cleanup &C) {
- return C.isDestroyedAtEndOf(Kind);
- });
- Info.CleanupStack.erase(NewEnd, Info.CleanupStack.end());
- return Success;
}
- };
- typedef ScopeRAII<ScopeKind::Block> BlockScopeRAII;
- typedef ScopeRAII<ScopeKind::FullExpression> FullExpressionRAII;
- typedef ScopeRAII<ScopeKind::Call> CallScopeRAII;
-}
+
+ // Compact any retained cleanups.
+ auto NewEnd = Info.CleanupStack.begin() + OldStackSize;
+ if (Kind != ScopeKind::Block)
+ NewEnd = std::remove_if(NewEnd, Info.CleanupStack.end(), [](Cleanup &C) {
+ return C.isDestroyedAtEndOf(Kind);
+ });
+ Info.CleanupStack.erase(NewEnd, Info.CleanupStack.end());
+ return Success;
+ }
+};
+typedef ScopeRAII<ScopeKind::Block> BlockScopeRAII;
+typedef ScopeRAII<ScopeKind::FullExpression> FullExpressionRAII;
+typedef ScopeRAII<ScopeKind::Call> CallScopeRAII;
+} // namespace
bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
CheckSubobjectKind CSK) {
if (Invalid)
return false;
if (isOnePastTheEnd()) {
- Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
- << CSK;
+ Info.CCEDiag(E, diag::note_constexpr_past_end_subobject) << CSK;
setInvalid();
return false;
}
@@ -1417,11 +1391,9 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
// the most derived array.
if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
Info.CCEDiag(E, diag::note_constexpr_array_index)
- << N << /*array*/ 0
- << static_cast<unsigned>(getMostDerivedArraySize());
+ << N << /*array*/ 0 << static_cast<unsigned>(getMostDerivedArraySize());
else
- Info.CCEDiag(E, diag::note_constexpr_array_index)
- << N << /*non-array*/ 1;
+ Info.CCEDiag(E, diag::note_constexpr_array_index) << N << /*non-array*/ 1;
setInvalid();
}
@@ -1504,322 +1476,314 @@ static bool isValidIndeterminateAccess(AccessKinds AK) {
}
namespace {
- struct ComplexValue {
- private:
- bool IsInt;
+struct ComplexValue {
+private:
+ bool IsInt;
- public:
- APSInt IntReal, IntImag;
- APFloat FloatReal, FloatImag;
+public:
+ APSInt IntReal, IntImag;
+ APFloat FloatReal, FloatImag;
- ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {}
+ ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {}
- void makeComplexFloat() { IsInt = false; }
- bool isComplexFloat() const { return !IsInt; }
- APFloat &getComplexFloatReal() { return FloatReal; }
- APFloat &getComplexFloatImag() { return FloatImag; }
+ void makeComplexFloat() { IsInt = false; }
+ bool isComplexFloat() const { return !IsInt; }
+ APFloat &getComplexFloatReal() { return FloatReal; }
+ APFloat &getComplexFloatImag() { return FloatImag; }
- void makeComplexInt() { IsInt = true; }
- bool isComplexInt() const { return IsInt; }
- APSInt &getComplexIntReal() { return IntReal; }
- APSInt &getComplexIntImag() { return IntImag; }
+ void makeComplexInt() { IsInt = true; }
+ bool isComplexInt() const { return IsInt; }
+ APSInt &getComplexIntReal() { return IntReal; }
+ APSInt &getComplexIntImag() { return IntImag; }
- void moveInto(APValue &v) const {
- if (isComplexFloat())
- v = APValue(FloatReal, FloatImag);
- else
- v = APValue(IntReal, IntImag);
- }
- void setFrom(const APValue &v) {
- assert(v.isComplexFloat() || v.isComplexInt());
- if (v.isComplexFloat()) {
- makeComplexFloat();
- FloatReal = v.getComplexFloatReal();
- FloatImag = v.getComplexFloatImag();
- } else {
- makeComplexInt();
- IntReal = v.getComplexIntReal();
- IntImag = v.getComplexIntImag();
- }
+ void moveInto(APValue &v) const {
+ if (isComplexFloat())
+ v = APValue(FloatReal, FloatImag);
+ else
+ v = APValue(IntReal, IntImag);
+ }
+ void setFrom(const APValue &v) {
+ assert(v.isComplexFloat() || v.isComplexInt());
+ if (v.isComplexFloat()) {
+ makeComplexFloat();
+ FloatReal = v.getComplexFloatReal();
+ FloatImag = v.getComplexFloatImag();
+ } else {
+ makeComplexInt();
+ IntReal = v.getComplexIntReal();
+ IntImag = v.getComplexIntImag();
}
- };
+ }
+};
- struct LValue {
- APValue::LValueBase Base;
- CharUnits Offset;
- SubobjectDesignator Designator;
- bool IsNullPtr : 1;
- bool InvalidBase : 1;
- // P2280R4 track if we have an unknown reference or pointer.
- bool AllowConstexprUnknown = false;
-
- const APValue::LValueBase getLValueBase() const { return Base; }
- bool allowConstexprUnknown() const { return AllowConstexprUnknown; }
- CharUnits &getLValueOffset() { return Offset; }
- const CharUnits &getLValueOffset() const { return Offset; }
- SubobjectDesignator &getLValueDesignator() { return Designator; }
- const SubobjectDesignator &getLValueDesignator() const { return Designator;}
- bool isNullPointer() const { return IsNullPtr;}
-
- unsigned getLValueCallIndex() const { return Base.getCallIndex(); }
- unsigned getLValueVersion() const { return Base.getVersion(); }
-
- void moveInto(APValue &V) const {
- if (Designator.Invalid)
- V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr);
- else {
- assert(!InvalidBase && "APValues can't handle invalid LValue bases");
- V = APValue(Base, Offset, Designator.Entries,
- Designator.IsOnePastTheEnd, IsNullPtr);
- }
- if (AllowConstexprUnknown)
- V.setConstexprUnknown();
- }
- void setFrom(const ASTContext &Ctx, const APValue &V) {
- assert(V.isLValue() && "Setting LValue from a non-LValue?");
- Base = V.getLValueBase();
- Offset = V.getLValueOffset();
- InvalidBase = false;
- Designator = SubobjectDesignator(Ctx, V);
- IsNullPtr = V.isNullPointer();
- AllowConstexprUnknown = V.allowConstexprUnknown();
+struct LValue {
+ APValue::LValueBase Base;
+ CharUnits Offset;
+ SubobjectDesignator Designator;
+ bool IsNullPtr : 1;
+ bool InvalidBase : 1;
+ // P2280R4 track if we have an unknown reference or pointer.
+ bool AllowConstexprUnknown = false;
+
+ const APValue::LValueBase getLValueBase() const { return Base; }
+ bool allowConstexprUnknown() const { return AllowConstexprUnknown; }
+ CharUnits &getLValueOffset() { return Offset; }
+ const CharUnits &getLValueOffset() const { return Offset; }
+ SubobjectDesignator &getLValueDesignator() { return Designator; }
+ const SubobjectDesignator &getLValueDesignator() const { return Designator; }
+ bool isNullPointer() const { return IsNullPtr; }
+
+ unsigned getLValueCallIndex() const { return Base.getCallIndex(); }
+ unsigned getLValueVersion() const { return Base.getVersion(); }
+
+ void moveInto(APValue &V) const {
+ if (Designator.Invalid)
+ V = APValue(Base, Offset, APValue::NoLValuePath(), IsNullPtr);
+ else {
+ assert(!InvalidBase && "APValues can't handle invalid LValue bases");
+ V = APValue(Base, Offset, Designator.Entries, Designator.IsOnePastTheEnd,
+ IsNullPtr);
}
+ if (AllowConstexprUnknown)
+ V.setConstexprUnknown();
+ }
+ void setFrom(const ASTContext &Ctx, const APValue &V) {
+ assert(V.isLValue() && "Setting LValue from a non-LValue?");
+ Base = V.getLValueBase();
+ Offset = V.getLValueOffset();
+ InvalidBase = false;
+ Designator = SubobjectDesignator(Ctx, V);
+ IsNullPtr = V.isNullPointer();
+ AllowConstexprUnknown = V.allowConstexprUnknown();
+ }
- void set(APValue::LValueBase B, bool BInvalid = false) {
+ void set(APValue::LValueBase B, bool BInvalid = false) {
#ifndef NDEBUG
- // We only allow a few types of invalid bases. Enforce that here.
- if (BInvalid) {
- const auto *E = B.get<const Expr *>();
- assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
- "Unexpected type of invalid base");
- }
+ // We only allow a few types of invalid bases. Enforce that here.
+ if (BInvalid) {
+ const auto *E = B.get<const Expr *>();
+ assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
+ "Unexpected type of invalid base");
+ }
#endif
- Base = B;
- Offset = CharUnits::fromQuantity(0);
- InvalidBase = BInvalid;
- Designator = SubobjectDesignator(getType(B));
- IsNullPtr = false;
- AllowConstexprUnknown = false;
- }
+ Base = B;
+ Offset = CharUnits::fromQuantity(0);
+ InvalidBase = BInvalid;
+ Designator = SubobjectDesignator(getType(B));
+ IsNullPtr = false;
+ AllowConstexprUnknown = false;
+ }
- void setNull(ASTContext &Ctx, QualType PointerTy) {
- Base = (const ValueDecl *)nullptr;
- Offset =
- CharUnits::fromQuantity(Ctx.getTargetNullPointerValue(PointerTy));
- InvalidBase = false;
- Designator = SubobjectDesignator(PointerTy->getPointeeType());
- IsNullPtr = true;
- AllowConstexprUnknown = false;
- }
+ void setNull(ASTContext &Ctx, QualType PointerTy) {
+ Base = (const ValueDecl *)nullptr;
+ Offset = CharUnits::fromQuantity(Ctx.getTargetNullPointerValue(PointerTy));
+ InvalidBase = false;
+ Designator = SubobjectDesignator(PointerTy->getPointeeType());
+ IsNullPtr = true;
+ AllowConstexprUnknown = false;
+ }
- void setInvalid(APValue::LValueBase B, unsigned I = 0) {
- set(B, true);
- }
+ void setInvalid(APValue::LValueBase B, unsigned I = 0) { set(B, true); }
- std::string toString(ASTContext &Ctx, QualType T) const {
- APValue Printable;
- moveInto(Printable);
- return Printable.getAsString(Ctx, T);
- }
+ std::string toString(ASTContext &Ctx, QualType T) const {
+ APValue Printable;
+ moveInto(Printable);
+ return Printable.getAsString(Ctx, T);
+ }
- private:
- // Check that this LValue is not based on a null pointer. If it is, produce
- // a diagnostic and mark the designator as invalid.
- template <typename GenDiagType>
- bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
- if (Designator.Invalid)
- return false;
- if (IsNullPtr) {
- GenDiag();
- Designator.setInvalid();
- return false;
- }
- return true;
+private:
+ // Check that this LValue is not based on a null pointer. If it is, produce
+ // a diagnostic and mark the designator as invalid.
+ template <typename GenDiagType>
+ bool checkNullPointerDiagnosingWith(const GenDiagType &GenDiag) {
+ if (Designator.Invalid)
+ return false;
+ if (IsNullPtr) {
+ GenDiag();
+ Designator.setInvalid();
+ return false;
}
+ return true;
+ }
- public:
- bool checkNullPointer(EvalInfo &Info, const Expr *E,
- CheckSubobjectKind CSK) {
- return checkNullPointerDiagnosingWith([&Info, E, CSK] {
- Info.CCEDiag(E, diag::note_constexpr_null_subobject) << CSK;
- });
- }
+public:
+ bool checkNullPointer(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
+ return checkNullPointerDiagnosingWith([&Info, E, CSK] {
+ Info.CCEDiag(E, diag::note_constexpr_null_subobject) << CSK;
+ });
+ }
- bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
- AccessKinds AK) {
- return checkNullPointerDiagnosingWith([&Info, E, AK] {
- if (AK == AccessKinds::AK_Dereference)
- Info.FFDiag(E, diag::note_constexpr_dereferencing_null);
- else
- Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
- });
- }
+ bool checkNullPointerForFoldAccess(EvalInfo &Info, const Expr *E,
+ AccessKinds AK) {
+ return checkNullPointerDiagnosingWith([&Info, E, AK] {
+ if (AK == AccessKinds::AK_Dereference)
+ Info.FFDiag(E, diag::note_constexpr_dereferencing_null);
+ else
+ Info.FFDiag(E, diag::note_constexpr_access_null) << AK;
+ });
+ }
- // Check this LValue refers to an object. If not, set the designator to be
- // invalid and emit a diagnostic.
- bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
- return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
- Designator.checkSubobject(Info, E, CSK);
- }
+ // Check this LValue refers to an object. If not, set the designator to be
+ // invalid and emit a diagnostic.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
+ return (CSK == CSK_ArrayToPointer || checkNullPointer(Info, E, CSK)) &&
+ Designator.checkSubobject(Info, E, CSK);
+ }
- void addDecl(EvalInfo &Info, const Expr *E,
- const Decl *D, bool Virtual = false) {
- if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
- Designator.addDeclUnchecked(D, Virtual);
- }
- void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) {
- if (!Designator.Entries.empty()) {
- Info.CCEDiag(E, diag::note_constexpr_unsupported_unsized_array);
- Designator.setInvalid();
- return;
- }
- if (checkSubobject(Info, E, CSK_ArrayToPointer)) {
- assert(getType(Base).getNonReferenceType()->isPointerType() ||
- getType(Base).getNonReferenceType()->isArrayType());
- Designator.FirstEntryIsAnUnsizedArray = true;
- Designator.addUnsizedArrayUnchecked(ElemTy);
- }
- }
- void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
- if (checkSubobject(Info, E, CSK_ArrayToPointer))
- Designator.addArrayUnchecked(CAT);
- }
- void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
- if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
- Designator.addComplexUnchecked(EltTy, Imag);
- }
- void addVectorElement(EvalInfo &Info, const Expr *E, QualType EltTy,
- uint64_t Size, uint64_t Idx) {
- if (checkSubobject(Info, E, CSK_VectorElement))
- Designator.addVectorElementUnchecked(EltTy, Size, Idx);
+ void addDecl(EvalInfo &Info, const Expr *E, const Decl *D,
+ bool Virtual = false) {
+ if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
+ Designator.addDeclUnchecked(D, Virtual);
+ }
+ void addUnsizedArray(EvalInfo &Info, const Expr *E, QualType ElemTy) {
+ if (!Designator.Entries.empty()) {
+ Info.CCEDiag(E, diag::note_constexpr_unsupported_unsized_array);
+ Designator.setInvalid();
+ return;
}
- void clearIsNullPointer() {
- IsNullPtr = false;
+ if (checkSubobject(Info, E, CSK_ArrayToPointer)) {
+ assert(getType(Base).getNonReferenceType()->isPointerType() ||
+ getType(Base).getNonReferenceType()->isArrayType());
+ Designator.FirstEntryIsAnUnsizedArray = true;
+ Designator.addUnsizedArrayUnchecked(ElemTy);
}
- void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E,
- const APSInt &Index, CharUnits ElementSize) {
- // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
- // but we're not required to diagnose it and it's valid in C++.)
- if (!Index)
- return;
-
- // Compute the new offset in the appropriate width, wrapping at 64 bits.
- // FIXME: When compiling for a 32-bit target, we should use 32-bit
- // offsets.
- uint64_t Offset64 = Offset.getQuantity();
- uint64_t ElemSize64 = ElementSize.getQuantity();
- uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
- Offset = CharUnits::fromQuantity(Offset64 + ElemSize64 * Index64);
+ }
+ void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
+ if (checkSubobject(Info, E, CSK_ArrayToPointer))
+ Designator.addArrayUnchecked(CAT);
+ }
+ void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
+ if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
+ Designator.addComplexUnchecked(EltTy, Imag);
+ }
+ void addVectorElement(EvalInfo &Info, const Expr *E, QualType EltTy,
+ uint64_t Size, uint64_t Idx) {
+ if (checkSubobject(Info, E, CSK_VectorElement))
+ Designator.addVectorElementUnchecked(EltTy, Size, Idx);
+ }
+ void clearIsNullPointer() { IsNullPtr = false; }
+ void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E, const APSInt &Index,
+ CharUnits ElementSize) {
+ // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
+ // but we're not required to diagnose it and it's valid in C++.)
+ if (!Index)
+ return;
- if (checkNullPointer(Info, E, CSK_ArrayIndex))
- Designator.adjustIndex(Info, E, Index, *this);
+ // Compute the new offset in the appropriate width, wrapping at 64 bits.
+ // FIXME: When compiling for a 32-bit target, we should use 32-bit
+ // offsets.
+ uint64_t Offset64 = Offset.getQuantity();
+ uint64_t ElemSize64 = ElementSize.getQuantity();
+ uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
+ Offset = CharUnits::fromQuantity(Offset64 + ElemSize64 * Index64);
+
+ if (checkNullPointer(Info, E, CSK_ArrayIndex))
+ Designator.adjustIndex(Info, E, Index, *this);
+ clearIsNullPointer();
+ }
+ void adjustOffset(CharUnits N) {
+ Offset += N;
+ if (N.getQuantity())
clearIsNullPointer();
- }
- void adjustOffset(CharUnits N) {
- Offset += N;
- if (N.getQuantity())
- clearIsNullPointer();
- }
- };
+ }
+};
- struct MemberPtr {
- MemberPtr() {}
- explicit MemberPtr(const ValueDecl *Decl)
- : DeclAndIsDerivedMember(Decl, false) {}
-
- /// The member or (direct or indirect) field referred to by this member
- /// pointer, or 0 if this is a null member pointer.
- const ValueDecl *getDecl() const {
- return DeclAndIsDerivedMember.getPointer();
- }
- /// Is this actually a member of some type derived from the relevant class?
- bool isDerivedMember() const {
- return DeclAndIsDerivedMember.getInt();
- }
- /// Get the class which the declaration actually lives in.
- const CXXRecordDecl *getContainingRecord() const {
- return cast<CXXRecordDecl>(
- DeclAndIsDerivedMember.getPointer()->getDeclContext());
- }
-
- void moveInto(APValue &V) const {
- V = APValue(getDecl(), isDerivedMember(), Path);
- }
- void setFrom(const APValue &V) {
- assert(V.isMemberPointer());
- DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
- DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
- Path.clear();
- llvm::append_range(Path, V.getMemberPointerPath());
- }
-
- /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
- /// whether the member is a member of some class derived from the class type
- /// of the member pointer.
- llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
- /// Path - The path of base/derived classes from the member declaration's
- /// class (exclusive) to the class type of the member pointer (inclusive).
- SmallVector<const CXXRecordDecl*, 4> Path;
-
- /// Perform a cast towards the class of the Decl (either up or down the
- /// hierarchy).
- bool castBack(const CXXRecordDecl *Class) {
- assert(!Path.empty());
- const CXXRecordDecl *Expected;
- if (Path.size() >= 2)
- Expected = Path[Path.size() - 2];
- else
- Expected = getContainingRecord();
- if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
- // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
- // if B does not contain the original member and is not a base or
- // derived class of the class containing the original member, the result
- // of the cast is undefined.
- // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
- // (D::*). We consider that to be a language defect.
- return false;
- }
- Path.pop_back();
- return true;
+struct MemberPtr {
+ MemberPtr() {}
+ explicit MemberPtr(const ValueDecl *Decl)
+ : DeclAndIsDerivedMember(Decl, false) {}
+
+ /// The member or (direct or indirect) field referred to by this member
+ /// pointer, or 0 if this is a null member pointer.
+ const ValueDecl *getDecl() const {
+ return DeclAndIsDerivedMember.getPointer();
+ }
+ /// Is this actually a member of some type derived from the relevant class?
+ bool isDerivedMember() const { return DeclAndIsDerivedMember.getInt(); }
+ /// Get the class which the declaration actually lives in.
+ const CXXRecordDecl *getContainingRecord() const {
+ return cast<CXXRecordDecl>(
+ DeclAndIsDerivedMember.getPointer()->getDeclContext());
+ }
+
+ void moveInto(APValue &V) const {
+ V = APValue(getDecl(), isDerivedMember(), Path);
+ }
+ void setFrom(const APValue &V) {
+ assert(V.isMemberPointer());
+ DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
+ DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
+ Path.clear();
+ llvm::append_range(Path, V.getMemberPointerPath());
+ }
+
+ /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
+ /// whether the member is a member of some class derived from the class type
+ /// of the member pointer.
+ llvm::PointerIntPair<const ValueDecl *, 1, bool> DeclAndIsDerivedMember;
+ /// Path - The path of base/derived classes from the member declaration's
+ /// class (exclusive) to the class type of the member pointer (inclusive).
+ SmallVector<const CXXRecordDecl *, 4> Path;
+
+ /// Perform a cast towards the class of the Decl (either up or down the
+ /// hierarchy).
+ bool castBack(const CXXRecordDecl *Class) {
+ assert(!Path.empty());
+ const CXXRecordDecl *Expected;
+ if (Path.size() >= 2)
+ Expected = Path[Path.size() - 2];
+ else
+ Expected = getContainingRecord();
+ if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
+ // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
+ // if B does not contain the original member and is not a base or
+ // derived class of the class containing the original member, the result
+ // of the cast is undefined.
+ // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
+ // (D::*). We consider that to be a language defect.
+ return false;
}
- /// Perform a base-to-derived member pointer cast.
- bool castToDerived(const CXXRecordDecl *Derived) {
- if (!getDecl())
- return true;
- if (!isDerivedMember()) {
- Path.push_back(Derived);
- return true;
- }
- if (!castBack(Derived))
- return false;
- if (Path.empty())
- DeclAndIsDerivedMember.setInt(false);
+ Path.pop_back();
+ return true;
+ }
+ /// Perform a base-to-derived member pointer cast.
+ bool castToDerived(const CXXRecordDecl *Derived) {
+ if (!getDecl())
+ return true;
+ if (!isDerivedMember()) {
+ Path.push_back(Derived);
return true;
}
- /// Perform a derived-to-base member pointer cast.
- bool castToBase(const CXXRecordDecl *Base) {
- if (!getDecl())
- return true;
- if (Path.empty())
- DeclAndIsDerivedMember.setInt(true);
- if (isDerivedMember()) {
- Path.push_back(Base);
- return true;
- }
- return castBack(Base);
- }
- };
-
- /// Compare two member pointers, which are assumed to be of the same type.
- static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
- if (!LHS.getDecl() || !RHS.getDecl())
- return !LHS.getDecl() && !RHS.getDecl();
- if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
+ if (!castBack(Derived))
return false;
- return LHS.Path == RHS.Path;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(false);
+ return true;
}
+ /// Perform a derived-to-base member pointer cast.
+ bool castToBase(const CXXRecordDecl *Base) {
+ if (!getDecl())
+ return true;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(true);
+ if (isDerivedMember()) {
+ Path.push_back(Base);
+ return true;
+ }
+ return castBack(Base);
+ }
+};
+
+/// Compare two member pointers, which are assumed to be of the same type.
+static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
+ if (!LHS.getDecl() || !RHS.getDecl())
+ return !LHS.getDecl() && !RHS.getDecl();
+ if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
+ return false;
+ return LHS.Path == RHS.Path;
}
+} // namespace
void SubobjectDesignator::adjustIndex(EvalInfo &Info, const Expr *E, APSInt N,
const LValue &LV) {
@@ -1870,9 +1834,8 @@ void SubobjectDesignator::adjustIndex(EvalInfo &Info, const Expr *E, APSInt N,
}
static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
-static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
- const LValue &This, const Expr *E,
- bool AllowNonLiteralTypes = false);
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
+ const Expr *E, bool AllowNonLiteralTypes = false);
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK = false);
static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info,
@@ -1914,7 +1877,7 @@ static void negateAsSigned(APSInt &Int) {
Int = -Int;
}
-template<typename KeyT>
+template <typename KeyT>
APValue &CallStackFrame::createTemporary(const KeyT *Key, QualType T,
ScopeKind Scope, LValue &LV) {
unsigned Version = getTempVersion();
@@ -1987,9 +1950,9 @@ void CallStackFrame::describe(raw_ostream &Out) const {
Object->printPretty(Out, /*Helper=*/nullptr, Info.Ctx.getPrintingPolicy(),
/*Indentation=*/0);
if (Object->getType()->isPointerType())
- Out << "->";
+ Out << "->";
else
- Out << ".";
+ Out << ".";
} else if (const auto *OCE =
dyn_cast_if_present<CXXOperatorCallExpr>(CallExpr)) {
OCE->getArg(0)->printPretty(Out, /*Helper=*/nullptr,
@@ -2012,7 +1975,8 @@ void CallStackFrame::describe(raw_ostream &Out) const {
Out << '(';
for (FunctionDecl::param_const_iterator I = Callee->param_begin(),
- E = Callee->param_end(); I != E; ++I, ++ArgIndex) {
+ E = Callee->param_end();
+ I != E; ++I, ++ArgIndex) {
if (ArgIndex > (unsigned)IsMemberCall)
Out << ", ";
@@ -2066,7 +2030,7 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
if (!B)
return true;
- if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl *>()) {
// ... the address of an object with static storage duration,
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->hasGlobalStorage();
@@ -2081,7 +2045,7 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
return true;
- const Expr *E = B.get<const Expr*>();
+ const Expr *E = B.get<const Expr *>();
switch (E->getStmtClass()) {
default:
return false;
@@ -2126,7 +2090,7 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
}
static const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
- return LVal.Base.dyn_cast<const ValueDecl*>();
+ return LVal.Base.dyn_cast<const ValueDecl *>();
}
// Information about an LValueBase that is some kind of string.
@@ -2243,8 +2207,7 @@ static bool HasSameBase(const LValue &A, const LValue &B) {
if (!B.getLValueBase())
return false;
- if (A.getLValueBase().getOpaqueValue() !=
- B.getLValueBase().getOpaqueValue())
+ if (A.getLValueBase().getOpaqueValue() != B.getLValueBase().getOpaqueValue())
return false;
return A.getLValueCallIndex() == B.getLValueCallIndex() &&
@@ -2253,7 +2216,7 @@ static bool HasSameBase(const LValue &A, const LValue &B) {
static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
assert(Base && "no location for a null lvalue");
- const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl *>();
// For a parameter, find the corresponding call stack frame (if it still
// exists), and point at the parameter of the function definition we actually
@@ -2272,7 +2235,7 @@ static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
if (VD)
Info.Note(VD->getLocation(), diag::note_declared_at);
- else if (const Expr *E = Base.dyn_cast<const Expr*>())
+ else if (const Expr *E = Base.dyn_cast<const Expr *>())
Info.Note(E->getExprLoc(), diag::note_constexpr_temporary_here);
else if (DynamicAllocLValue DA = Base.dyn_cast<DynamicAllocLValue>()) {
// FIXME: Produce a note for dangling pointers too.
@@ -2314,7 +2277,7 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
const SubobjectDesignator &Designator = LVal.getLValueDesignator();
const Expr *BaseE = Base.dyn_cast<const Expr *>();
- const ValueDecl *BaseVD = Base.dyn_cast<const ValueDecl*>();
+ const ValueDecl *BaseVD = Base.dyn_cast<const ValueDecl *>();
// Additional restrictions apply in a template argument. We only enforce the
// C++20 restrictions here; additional syntactic and semantic restrictions
@@ -2475,7 +2438,7 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
// Does this refer one past the end of some object?
if (!Designator.Invalid && Designator.isOnePastTheEnd()) {
Info.FFDiag(Loc, diag::note_constexpr_past_end, 1)
- << !Designator.Entries.empty() << !!BaseVD << BaseVD;
+ << !Designator.Entries.empty() << !!BaseVD << BaseVD;
NoteLValueLocation(Info, Base);
}
@@ -2531,8 +2494,7 @@ static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
// Prvalue constant expressions must be of literal types.
if (Info.getLangOpts().CPlusPlus11)
- Info.FFDiag(E, diag::note_constexpr_nonliteral)
- << E->getType();
+ Info.FFDiag(E, diag::note_constexpr_nonliteral) << E->getType();
else
Info.FFDiag(E, diag::note_invalid_subexpr_in_const_expr);
return false;
@@ -2624,7 +2586,8 @@ static bool CheckEvaluationResult(CheckEvaluationResultKind CERK,
if (Value.isMemberPointer() &&
CERK == CheckEvaluationResultKind::ConstantExpression)
- return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value, Kind);
+ return CheckMemberPointerConstantExpression(Info, DiagLoc, Type, Value,
+ Kind);
// Everything else is fine.
return true;
@@ -2682,7 +2645,7 @@ static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
// We have a non-null base. These are generally known to be true, but if it's
// a weak declaration it can be null at runtime.
Result = true;
- const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
+ const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl *>();
return !Decl || !Decl->isWeak();
}
@@ -2738,11 +2701,10 @@ static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
return HandleConversionToBool(Val, Result);
}
-template<typename T>
-static bool HandleOverflow(EvalInfo &Info, const Expr *E,
- const T &SrcValue, QualType DestType) {
- Info.CCEDiag(E, diag::note_constexpr_overflow)
- << SrcValue << DestType;
+template <typename T>
+static bool HandleOverflow(EvalInfo &Info, const Expr *E, const T &SrcValue,
+ QualType DestType) {
+ Info.CCEDiag(E, diag::note_constexpr_overflow) << SrcValue << DestType;
return Info.noteUndefinedBehavior();
}
@@ -2755,8 +2717,8 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
Result = APSInt(DestWidth, !DestSigned);
bool ignored;
- if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
- & APFloat::opInvalidOp)
+ if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored) &
+ APFloat::opInvalidOp)
return HandleOverflow(Info, E, Value, DestType);
return true;
}
@@ -2845,17 +2807,17 @@ static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
}
static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
- const FPOptions FPO,
- QualType SrcType, const APSInt &Value,
- QualType DestType, APFloat &Result) {
+ const FPOptions FPO, QualType SrcType,
+ const APSInt &Value, QualType DestType,
+ APFloat &Result) {
Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
APFloat::opStatus St = Result.convertFromAPInt(Value, Value.isSigned(), RM);
return checkFloatingPointResult(Info, E, St);
}
-static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
- APValue &Value, const FieldDecl *FD) {
+static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E, APValue &Value,
+ const FieldDecl *FD) {
assert(FD->isBitField() && "truncateBitfieldValue on non-bitfield");
if (!Value.isInt()) {
@@ -2878,7 +2840,7 @@ static bool truncateBitfieldValue(EvalInfo &Info, const Expr *E,
/// Perform the given integer operation, which is known to need at most BitWidth
/// bits, and check for overflow in the original type (if that type was not an
/// unsigned type).
-template<typename Operation>
+template <typename Operation>
static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
const APSInt &LHS, const APSInt &RHS,
unsigned BitWidth, Operation Op,
@@ -2920,9 +2882,15 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
case BO_Sub:
return CheckedIntArithmetic(Info, E, LHS, RHS, LHS.getBitWidth() + 1,
std::minus<APSInt>(), Result);
- case BO_And: Result = LHS & RHS; return true;
- case BO_Xor: Result = LHS ^ RHS; return true;
- case BO_Or: Result = LHS | RHS; return true;
+ case BO_And:
+ Result = LHS & RHS;
+ return true;
+ case BO_Xor:
+ Result = LHS ^ RHS;
+ return true;
+ case BO_Or:
+ Result = LHS | RHS;
+ return true;
case BO_Div:
case BO_Rem:
if (RHS == 0) {
@@ -2942,7 +2910,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
if (Info.getLangOpts().OpenCL)
// OpenCL 6.3j: shift values are effectively % word size of LHS.
RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
- static_cast<uint64_t>(LHS.getBitWidth() - 1)),
+ static_cast<uint64_t>(LHS.getBitWidth() - 1)),
RHS.isUnsigned());
else if (RHS.isSigned() && RHS.isNegative()) {
// During constant-folding, a negative shift is an opposite shift. Such
@@ -2956,10 +2924,10 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
shift_left:
// C++11 [expr.shift]p1: Shift width must be less than the bit width of
// the shifted type.
- unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ unsigned SA = (unsigned)RHS.getLimitedValue(LHS.getBitWidth() - 1);
if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
- << RHS << E->getType() << LHS.getBitWidth();
+ << RHS << E->getType() << LHS.getBitWidth();
if (!Info.noteUndefinedBehavior())
return false;
} else if (LHS.isSigned() && !Info.getLangOpts().CPlusPlus20) {
@@ -2984,7 +2952,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
if (Info.getLangOpts().OpenCL)
// OpenCL 6.3j: shift values are effectively % word size of LHS.
RHS &= APSInt(llvm::APInt(RHS.getBitWidth(),
- static_cast<uint64_t>(LHS.getBitWidth() - 1)),
+ static_cast<uint64_t>(LHS.getBitWidth() - 1)),
RHS.isUnsigned());
else if (RHS.isSigned() && RHS.isNegative()) {
// During constant-folding, a negative shift is an opposite shift. Such a
@@ -2998,10 +2966,10 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
shift_right:
// C++11 [expr.shift]p1: Shift width must be less than the bit width of the
// shifted type.
- unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ unsigned SA = (unsigned)RHS.getLimitedValue(LHS.getBitWidth() - 1);
if (SA != RHS) {
Info.CCEDiag(E, diag::note_constexpr_large_shift)
- << RHS << E->getType() << LHS.getBitWidth();
+ << RHS << E->getType() << LHS.getBitWidth();
if (!Info.noteUndefinedBehavior())
return false;
}
@@ -3010,12 +2978,24 @@ static bool handleIntIntBinOp(EvalInfo &Info, const BinaryOperator *E,
return true;
}
- case BO_LT: Result = LHS < RHS; return true;
- case BO_GT: Result = LHS > RHS; return true;
- case BO_LE: Result = LHS <= RHS; return true;
- case BO_GE: Result = LHS >= RHS; return true;
- case BO_EQ: Result = LHS == RHS; return true;
- case BO_NE: Result = LHS != RHS; return true;
+ case BO_LT:
+ Result = LHS < RHS;
+ return true;
+ case BO_GT:
+ Result = LHS > RHS;
+ return true;
+ case BO_LE:
+ Result = LHS <= RHS;
+ return true;
+ case BO_GE:
+ Result = LHS >= RHS;
+ return true;
+ case BO_EQ:
+ Result = LHS == RHS;
+ return true;
+ case BO_NE:
+ Result = LHS != RHS;
+ return true;
case BO_Cmp:
llvm_unreachable("BO_Cmp should be handled elsewhere");
}
@@ -3233,7 +3213,8 @@ static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
// Truncate the path to the subobject, and remove any derived-to-base offsets.
const RecordDecl *RD = TruncatedType;
for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
- if (RD->isInvalidDecl()) return false;
+ if (RD->isInvalidDecl())
+ return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
if (isVirtualBaseClass(D.Entries[I]))
@@ -3251,7 +3232,8 @@ static bool HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
const CXXRecordDecl *Base,
const ASTRecordLayout *RL = nullptr) {
if (!RL) {
- if (Derived->isInvalidDecl()) return false;
+ if (Derived->isInvalidDecl())
+ return false;
RL = &Info.Ctx.getASTRecordLayout(Derived);
}
@@ -3282,7 +3264,8 @@ static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
return false;
// Find the virtual base class.
- if (DerivedDecl->isInvalidDecl()) return false;
+ if (DerivedDecl->isInvalidDecl())
+ return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
@@ -3294,8 +3277,7 @@ static bool HandleLValueBasePath(EvalInfo &Info, const CastExpr *E,
for (CastExpr::path_const_iterator PathI = E->path_begin(),
PathE = E->path_end();
PathI != PathE; ++PathI) {
- if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(),
- *PathI))
+ if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(), *PathI))
return false;
Type = (*PathI)->getType();
}
@@ -3323,7 +3305,8 @@ static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
const FieldDecl *FD,
const ASTRecordLayout *RL = nullptr) {
if (!RL) {
- if (FD->getParent()->isInvalidDecl()) return false;
+ if (FD->getParent()->isInvalidDecl())
+ return false;
RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
}
@@ -3521,8 +3504,7 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
!Info.CurrentCall->Callee ||
!Info.CurrentCall->Callee->Equals(VD->getDeclContext())) {
if (Info.getLangOpts().CPlusPlus11) {
- Info.FFDiag(E, diag::note_constexpr_function_param_value_unknown)
- << VD;
+ Info.FFDiag(E, diag::note_constexpr_function_param_value_unknown) << VD;
NoteLValueLocation(Info, Base);
} else {
Info.FFDiag(E);
@@ -3546,8 +3528,7 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
// Don't diagnose during potential constant expression checking; an
// initializer might be added later.
if (!Info.checkingPotentialConstantExpression()) {
- Info.FFDiag(E, diag::note_constexpr_var_init_unknown, 1)
- << VD;
+ Info.FFDiag(E, diag::note_constexpr_var_init_unknown, 1) << VD;
NoteLValueLocation(Info, Base);
}
return false;
@@ -3565,9 +3546,11 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
// have been value-dependent too), so diagnose that.
assert(!VD->mightBeUsableInConstantExpressions(Info.Ctx));
if (!Info.checkingPotentialConstantExpression()) {
- Info.FFDiag(E, Info.getLangOpts().CPlusPlus11
- ? diag::note_constexpr_ltor_non_constexpr
- : diag::note_constexpr_ltor_non_integral, 1)
+ Info.FFDiag(E,
+ Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral,
+ 1)
<< VD << VD->getType();
NoteLValueLocation(Info, Base);
}
@@ -3631,7 +3614,8 @@ static unsigned getBaseIndex(const CXXRecordDecl *Derived,
Base = Base->getCanonicalDecl();
unsigned Index = 0;
for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
- E = Derived->bases_end(); I != E; ++I, ++Index) {
+ E = Derived->bases_end();
+ I != E; ++I, ++Index) {
if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
return Index;
}
@@ -3656,8 +3640,7 @@ static APSInt extractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
if (auto PE = dyn_cast<PredefinedExpr>(Lit))
Lit = PE->getFunctionName();
const StringLiteral *S = cast<StringLiteral>(Lit);
- const ConstantArrayType *CAT =
- Info.Ctx.getAsConstantArrayType(S->getType());
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(S->getType());
assert(CAT && "string literal isn't an array");
QualType CharType = CAT->getElementType();
assert(CharType->isIntegerType() && "unexpected character type");
@@ -3682,8 +3665,8 @@ static void expandStringLiteral(EvalInfo &Info, const StringLiteral *S,
assert(CharType->isIntegerType() && "unexpected character type");
unsigned Elts = CAT->getZExtSize();
- Result = APValue(APValue::UninitArray(),
- std::min(S->getLength(), Elts), Elts);
+ Result =
+ APValue(APValue::UninitArray(), std::min(S->getLength(), Elts), Elts);
APSInt Value(Info.Ctx.getTypeSize(CharType),
CharType->isUnsignedIntegerType());
if (Result.hasArrayFiller())
@@ -3701,7 +3684,7 @@ static void expandArray(APValue &Array, unsigned Index) {
// Always at least double the number of elements for which we store a value.
unsigned OldElts = Array.getArrayInitializedElts();
- unsigned NewElts = std::max(Index+1, OldElts * 2);
+ unsigned NewElts = std::max(Index + 1, OldElts * 2);
NewElts = std::min(Size, std::max(NewElts, 8u));
// Copy the data across.
@@ -3857,7 +3840,7 @@ struct CompleteObject {
if (!Info.getLangOpts().CPlusPlus14 &&
AK != AccessKinds::AK_IsWithinLifetime)
return false;
- return lifetimeStartedInEvaluation(Info, Base, /*MutableSubobject*/true);
+ return lifetimeStartedInEvaluation(Info, Base, /*MutableSubobject*/ true);
}
explicit operator bool() const { return !Type.isNull(); }
@@ -3904,7 +3887,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
// Walk the designator's path to find the subobject.
for (unsigned I = 0, N = Sub.Entries.size(); /**/; ++I) {
- // Reading an indeterminate value is undefined, but assigning over one is OK.
+ // Reading an indeterminate value is undefined, but assigning over one is
+ // OK.
if ((O->isAbsent() && !(handler.AccessKind == AK_Construct && I == N)) ||
(O->isIndeterminate() &&
!isValidIndeterminateAccess(handler.AccessKind))) {
@@ -3946,7 +3930,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
DiagKind = 2;
Loc = VolatileField->getLocation();
Decl = VolatileField;
- } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl*>()) {
+ } else if (auto *VD = Obj.Base.dyn_cast<const ValueDecl *>()) {
DiagKind = 1;
Loc = VD->getLocation();
Decl = VD;
@@ -3979,8 +3963,8 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
return false;
// If we modified a bit-field, truncate it to the right width.
- if (isModification(handler.AccessKind) &&
- LastField && LastField->isBitField() &&
+ if (isModification(handler.AccessKind) && LastField &&
+ LastField->isBitField() &&
!truncateBitfieldValue(Info, E, *O, LastField))
return false;
@@ -4000,7 +3984,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
// designator which points more than one past the end of the array.
if (Info.getLangOpts().CPlusPlus11)
Info.FFDiag(E, diag::note_constexpr_access_past_end)
- << handler.AccessKind;
+ << handler.AccessKind;
else
Info.FFDiag(E);
return handler.failed();
@@ -4025,7 +4009,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
if (Index > 1) {
if (Info.getLangOpts().CPlusPlus11)
Info.FFDiag(E, diag::note_constexpr_access_past_end)
- << handler.AccessKind;
+ << handler.AccessKind;
else
Info.FFDiag(E);
return handler.failed();
@@ -4036,12 +4020,13 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
assert(I == N - 1 && "extracting subobject of scalar?");
if (O->isComplexInt()) {
- return handler.found(Index ? O->getComplexIntImag()
- : O->getComplexIntReal(), ObjType);
+ return handler.found(
+ Index ? O->getComplexIntImag() : O->getComplexIntReal(), ObjType);
} else {
assert(O->isComplexFloat());
return handler.found(Index ? O->getComplexFloatImag()
- : O->getComplexFloatReal(), ObjType);
+ : O->getComplexFloatReal(),
+ ObjType);
}
} else if (const auto *VT = ObjType->getAs<VectorType>()) {
uint64_t Index = Sub.Entries[I].getAsArrayIndex();
@@ -4068,7 +4053,7 @@ findSubobject(EvalInfo &Info, const Expr *E, const CompleteObject &Obj,
if (Field->isMutable() &&
!Obj.mayAccessMutableMembers(Info, handler.AccessKind)) {
Info.FFDiag(E, diag::note_constexpr_access_mutable, 1)
- << handler.AccessKind << Field;
+ << handler.AccessKind << Field;
Info.Note(Field->getLocation(), diag::note_declared_at);
return handler.failed();
}
@@ -4201,9 +4186,8 @@ const AccessKinds ModifySubobjectHandler::AccessKind;
/// Update the designated sub-object of an rvalue to the given value.
static bool modifySubobject(EvalInfo &Info, const Expr *E,
const CompleteObject &Obj,
- const SubobjectDesignator &Sub,
- APValue &NewVal) {
- ModifySubobjectHandler Handler = { Info, NewVal, E };
+ const SubobjectDesignator &Sub, APValue &NewVal) {
+ ModifySubobjectHandler Handler = {Info, NewVal, E};
return findSubobject(Info, E, Obj, Sub, Handler);
}
@@ -4289,7 +4273,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
Info.getCallFrameAndDepth(LVal.getLValueCallIndex());
if (!Frame) {
Info.FFDiag(E, diag::note_constexpr_lifetime_ended, 1)
- << AK << LVal.Base.is<const ValueDecl*>();
+ << AK << LVal.Base.is<const ValueDecl *>();
NoteLValueLocation(Info, LVal.Base);
return CompleteObject();
}
@@ -4304,7 +4288,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
if (isFormalAccess(AK) && LValType.isVolatileQualified()) {
if (Info.getLangOpts().CPlusPlus)
Info.FFDiag(E, diag::note_constexpr_access_volatile_type)
- << AK << LValType;
+ << AK << LValType;
else
Info.FFDiag(E);
return CompleteObject();
@@ -4424,9 +4408,11 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
// folding of const floating-point types, in order to make static const
// data members of such types (supported as an extension) more useful.
if (Info.getLangOpts().CPlusPlus) {
- Info.CCEDiag(E, Info.getLangOpts().CPlusPlus11
- ? diag::note_constexpr_ltor_non_constexpr
- : diag::note_constexpr_ltor_non_integral, 1)
+ Info.CCEDiag(E,
+ Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral,
+ 1)
<< VD << BaseType;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
@@ -4435,9 +4421,11 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
} else {
// Never allow reading a non-const value.
if (Info.getLangOpts().CPlusPlus) {
- Info.FFDiag(E, Info.getLangOpts().CPlusPlus11
- ? diag::note_constexpr_ltor_non_constexpr
- : diag::note_constexpr_ltor_non_integral, 1)
+ Info.FFDiag(E,
+ Info.getLangOpts().CPlusPlus11
+ ? diag::note_constexpr_ltor_non_constexpr
+ : diag::note_constexpr_ltor_non_integral,
+ 1)
<< VD << BaseType;
Info.Note(VD->getLocation(), diag::note_declared_at);
} else {
@@ -4476,7 +4464,7 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
// When binding to a reference, the variable does not need to be
// within its lifetime.
else if (AK != clang::AK_Dereference) {
- const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+ const Expr *Base = LVal.Base.dyn_cast<const Expr *>();
if (!Frame) {
if (const MaterializeTemporaryExpr *MTE =
@@ -4603,7 +4591,7 @@ handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type,
return false;
// Check for special cases where there is no existing APValue to look at.
- const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+ const Expr *Base = LVal.Base.dyn_cast<const Expr *>();
AccessKinds AK =
WantObjectRepresentation ? AK_ReadObjectRepresentation : AK_Read;
@@ -4732,8 +4720,7 @@ struct CompoundAssignSubobjectHandler {
Value = HandleIntToIntCast(Info, E, SubobjType, PromotedLHSType, LHS);
return true;
} else if (RHS.isFloat()) {
- const FPOptions FPO = E->getFPFeaturesInEffect(
- Info.Ctx.getLangOpts());
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
APFloat FValue(0.0);
return HandleIntToFloatCast(Info, E, FPO, SubobjType, Value,
PromotedLHSType, FValue) &&
@@ -4798,8 +4785,8 @@ static bool handleCompoundAssignment(EvalInfo &Info,
}
CompleteObject Obj = findCompleteObject(Info, E, AK_Assign, LVal, LValType);
- CompoundAssignSubobjectHandler Handler = { Info, E, PromotedLValType, Opcode,
- RVal };
+ CompoundAssignSubobjectHandler Handler = {Info, E, PromotedLValType, Opcode,
+ RVal};
return Obj && findSubobject(Info, E, Obj, LVal.Designator, Handler);
}
@@ -4836,13 +4823,15 @@ struct IncDecSubobjectHandler {
case APValue::Float:
return found(Subobj.getFloat(), SubobjType);
case APValue::ComplexInt:
- return found(Subobj.getComplexIntReal(),
- SubobjType->castAs<ComplexType>()->getElementType()
- .withCVRQualifiers(SubobjType.getCVRQualifiers()));
+ return found(
+ Subobj.getComplexIntReal(),
+ SubobjType->castAs<ComplexType>()->getElementType().withCVRQualifiers(
+ SubobjType.getCVRQualifiers()));
case APValue::ComplexFloat:
- return found(Subobj.getComplexFloatReal(),
- SubobjType->castAs<ComplexType>()->getElementType()
- .withCVRQualifiers(SubobjType.getCVRQualifiers()));
+ return found(
+ Subobj.getComplexFloatReal(),
+ SubobjType->castAs<ComplexType>()->getElementType().withCVRQualifiers(
+ SubobjType.getCVRQualifiers()));
case APValue::LValue:
return foundPointer(Subobj, SubobjType);
default:
@@ -4862,7 +4851,8 @@ struct IncDecSubobjectHandler {
return false;
}
- if (Old) *Old = APValue(Value);
+ if (Old)
+ *Old = APValue(Value);
// bool arithmetic promotes to int, and the conversion back to bool
// doesn't reduce mod 2^n, so special-case it.
@@ -4879,7 +4869,7 @@ struct IncDecSubobjectHandler {
++Value;
if (!WasNegative && Value.isNegative() && E->canOverflow()) {
- APSInt ActualValue(Value, /*IsUnsigned*/true);
+ APSInt ActualValue(Value, /*IsUnsigned*/ true);
return HandleOverflow(Info, E, ActualValue, SubobjType);
}
} else {
@@ -4887,7 +4877,7 @@ struct IncDecSubobjectHandler {
if (WasNegative && !Value.isNegative() && E->canOverflow()) {
unsigned BitWidth = Value.getBitWidth();
- APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/false);
+ APSInt ActualValue(Value.sext(BitWidth + 1), /*IsUnsigned*/ false);
ActualValue.setBit(BitWidth);
return HandleOverflow(Info, E, ActualValue, SubobjType);
}
@@ -4898,7 +4888,8 @@ struct IncDecSubobjectHandler {
if (!checkConst(SubobjType))
return false;
- if (Old) *Old = APValue(Value);
+ if (Old)
+ *Old = APValue(Value);
APFloat One(Value.getSemantics(), 1);
llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
@@ -4980,8 +4971,7 @@ static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
/// \return The field or method declaration to which the member pointer refers,
/// or 0 if evaluation fails.
static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
- QualType LVType,
- LValue &LV,
+ QualType LVType, LValue &LV,
const Expr *RHS,
bool IncludeMember = true) {
MemberPtr MemPtr;
@@ -5011,8 +5001,8 @@ static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
unsigned PathLengthToMember =
LV.Designator.Entries.size() - MemPtr.Path.size();
for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
- const CXXRecordDecl *LVDecl = getAsBaseClass(
- LV.Designator.Entries[PathLengthToMember + I]);
+ const CXXRecordDecl *LVDecl =
+ getAsBaseClass(LV.Designator.Entries[PathLengthToMember + I]);
const CXXRecordDecl *MPDecl = MemPtr.Path[I];
if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl()) {
Info.FFDiag(RHS);
@@ -5071,7 +5061,7 @@ static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
if (!HandleLValueMember(Info, RHS, LV, FD))
return nullptr;
} else if (const IndirectFieldDecl *IFD =
- dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) {
+ dyn_cast<IndirectFieldDecl>(MemPtr.getDecl())) {
if (!HandleLValueIndirectMember(Info, RHS, LV, IFD))
return nullptr;
} else {
@@ -5342,12 +5332,10 @@ struct TempVersionRAII {
Frame.pushTempVersion();
}
- ~TempVersionRAII() {
- Frame.popTempVersion();
- }
+ ~TempVersionRAII() { Frame.popTempVersion(); }
};
-}
+} // namespace
static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const Stmt *S,
@@ -5452,8 +5440,8 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
const CaseStmt *CS = cast<CaseStmt>(SC);
APSInt LHS = CS->getLHS()->EvaluateKnownConstInt(Info.Ctx);
- APSInt RHS = CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx)
- : LHS;
+ APSInt RHS =
+ CS->getRHS() ? CS->getRHS()->EvaluateKnownConstInt(Info.Ctx) : LHS;
if (LHS <= Value && Value <= RHS) {
Found = SC;
break;
@@ -5591,8 +5579,7 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
}
}
- EvalStmtResult ESR =
- EvaluateLoopBody(Result, Info, FS->getBody(), Case);
+ EvalStmtResult ESR = EvaluateLoopBody(Result, Info, FS->getBody(), Case);
if (ShouldPropagateBreakContinue(Info, FS, /*Scopes=*/{}, ESR))
return ESR;
if (ESR != ESR_Continue)
@@ -5683,10 +5670,9 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
// We know we returned, but we don't know what the value is.
return ESR_Failed;
}
- if (RetExpr &&
- !(Result.Slot
- ? EvaluateInPlace(Result.Value, Info, *Result.Slot, RetExpr)
- : Evaluate(Result.Value, Info, RetExpr)))
+ if (RetExpr && !(Result.Slot ? EvaluateInPlace(Result.Value, Info,
+ *Result.Slot, RetExpr)
+ : Evaluate(Result.Value, Info, RetExpr)))
return ESR_Failed;
return Scope.destroy() ? ESR_Returned : ESR_Failed;
}
@@ -6011,7 +5997,7 @@ static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
// FIXME: If DiagDecl is an implicitly-declared special member function,
// we should be much more explicit about why it's not constexpr.
Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1)
- << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
+ << /*IsConstexpr*/ 0 << /*IsConstructor*/ 1 << CD;
Info.Note(CD->getLocation(), diag::note_declared_at);
} else {
Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
@@ -6055,7 +6041,7 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
// Can we evaluate this function call?
if (Definition && Body &&
(Definition->isConstexpr() || (Info.CurrentCall->CanEvalMSConstexpr &&
- Definition->hasAttr<MSConstexprAttr>())))
+ Definition->hasAttr<MSConstexprAttr>())))
return true;
const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
@@ -6088,10 +6074,10 @@ static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
// it's not constexpr.
if (CD && CD->isInheritingConstructor())
Info.FFDiag(CallLoc, diag::note_constexpr_invalid_inhctor, 1)
- << CD->getInheritedConstructor().getConstructor()->getParent();
+ << CD->getInheritedConstructor().getConstructor()->getParent();
else
Info.FFDiag(CallLoc, diag::note_constexpr_invalid_function, 1)
- << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
+ << DiagDecl->isConstexpr() << (bool)CD << DiagDecl;
Info.Note(DiagDecl->getLocation(), diag::note_declared_at);
} else {
Info.FFDiag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
@@ -6174,8 +6160,8 @@ struct DynamicType {
static const CXXRecordDecl *getBaseClassType(SubobjectDesignator &Designator,
unsigned PathLength) {
- assert(PathLength >= Designator.MostDerivedPathLength && PathLength <=
- Designator.Entries.size() && "invalid path length");
+ assert(PathLength >= Designator.MostDerivedPathLength &&
+ PathLength <= Designator.Entries.size() && "invalid path length");
return (PathLength == Designator.MostDerivedPathLength)
? Designator.MostDerivedType->getAsCXXRecordDecl()
: getAsBaseClass(Designator.Entries[PathLength - 1]);
@@ -6379,7 +6365,7 @@ static bool HandleDynamicCast(EvalInfo &Info, const ExplicitCastExpr *E,
assert(C && "dynamic_cast target is not void pointer nor class");
CanQualType CQT = Info.Ctx.getCanonicalTagType(C);
- auto RuntimeCheckFailed = [&] (CXXBasePaths *Paths) {
+ auto RuntimeCheckFailed = [&](CXXBasePaths *Paths) {
// C++ [expr.dynamic.cast]p9:
if (!E->isGLValue()) {
// The value of a failed cast to pointer type is the null pointer value
@@ -6503,7 +6489,7 @@ static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info,
if (LHS.InvalidBase || LHS.Designator.Invalid)
return false;
- llvm::SmallVector<std::pair<unsigned, const FieldDecl*>, 4> UnionPathLengths;
+ llvm::SmallVector<std::pair<unsigned, const FieldDecl *>, 4> UnionPathLengths;
// C++ [class.union]p5:
// define the set S(E) of subexpressions of E as follows:
unsigned PathLength = LHS.Designator.Entries.size();
@@ -6529,9 +6515,9 @@ static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info,
E = ME->getBase();
--PathLength;
- assert(declaresSameEntity(FD,
- LHS.Designator.Entries[PathLength]
- .getAsBaseOrMember().getPointer()));
+ assert(declaresSameEntity(
+ FD,
+ LHS.Designator.Entries[PathLength].getAsBaseOrMember().getPointer()));
// -- If E is of the form A[B] and is interpreted as a built-in array
// subscripting operator, S(E) is [S(the array operand, if any)].
@@ -6564,10 +6550,11 @@ static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info,
--PathLength;
assert(declaresSameEntity(Elt->getType()->getAsCXXRecordDecl(),
LHS.Designator.Entries[PathLength]
- .getAsBaseOrMember().getPointer()));
+ .getAsBaseOrMember()
+ .getPointer()));
}
- // -- Otherwise, S(E) is empty.
+ // -- Otherwise, S(E) is empty.
} else {
break;
}
@@ -6584,7 +6571,7 @@ static bool MaybeHandleUnionActiveMemberChange(EvalInfo &Info,
if (!Obj)
return false;
for (std::pair<unsigned, const FieldDecl *> LengthAndField :
- llvm::reverse(UnionPathLengths)) {
+ llvm::reverse(UnionPathLengths)) {
// Form a designator for the union object.
SubobjectDesignator D = LHS.Designator;
D.truncate(Info.Ctx, LHS.Base, LengthAndField.first);
@@ -6814,10 +6801,11 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
std::distance(RD->field_begin(), RD->field_end()));
else
// A union starts with no active member.
- Result = APValue((const FieldDecl*)nullptr);
+ Result = APValue((const FieldDecl *)nullptr);
}
- if (RD->isInvalidDecl()) return false;
+ if (RD->isInvalidDecl())
+ return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
// A scope for temporaries lifetime-extended by reference members.
@@ -6976,7 +6964,7 @@ static bool HandleConstructorCall(const Expr *E, const LValue &This,
}
static bool HandleConstructorCall(const Expr *E, const LValue &This,
- ArrayRef<const Expr*> Args,
+ ArrayRef<const Expr *> Args,
const CXXConstructorDecl *Definition,
EvalInfo &Info, APValue &Result) {
CallScopeRAII CallScope(Info);
@@ -7123,7 +7111,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceRange CallRange,
// We don't have a good way to iterate fields in reverse, so collect all the
// fields first and then walk them backwards.
- SmallVector<FieldDecl*, 16> Fields(RD->fields());
+ SmallVector<FieldDecl *, 16> Fields(RD->fields());
for (const FieldDecl *FD : llvm::reverse(Fields)) {
if (FD->isUnnamedBitField())
continue;
@@ -7185,12 +7173,12 @@ struct DestroyObjectHandler {
return false;
}
};
-}
+} // namespace
/// Perform a destructor or pseudo-destructor call on the given object, which
/// might in general not be a complete object.
-static bool HandleDestruction(EvalInfo &Info, const Expr *E,
- const LValue &This, QualType ThisType) {
+static bool HandleDestruction(EvalInfo &Info, const Expr *E, const LValue &This,
+ QualType ThisType) {
CompleteObject Obj = findCompleteObject(Info, E, AK_Destroy, This, ThisType);
DestroyObjectHandler Handler = {Info, E, This, AK_Destroy};
return Obj && findSubobject(Info, E, Obj, This.Designator, Handler);
@@ -7394,8 +7382,8 @@ class BitCastBuffer {
public:
BitCastBuffer(CharUnits Width, bool TargetIsLittleEndian)
- : Bytes(Width.getQuantity()),
- TargetIsLittleEndian(TargetIsLittleEndian) {}
+ : Bytes(Width.getQuantity()), TargetIsLittleEndian(TargetIsLittleEndian) {
+ }
[[nodiscard]] bool readObject(CharUnits Offset, CharUnits Width,
SmallVectorImpl<unsigned char> &Output) const {
@@ -7719,8 +7707,7 @@ class BufferToAPValueConverter {
T->isSpecificBuiltinType(BuiltinType::Char_U));
if (!IsStdByte && !IsUChar) {
QualType DisplayType(EnumSugar ? (const Type *)EnumSugar : T, 0);
- Info.FFDiag(BCE->getExprLoc(),
- diag::note_constexpr_bit_cast_indet_dest)
+ Info.FFDiag(BCE->getExprLoc(), diag::note_constexpr_bit_cast_indet_dest)
<< DisplayType << Info.Ctx.getLangOpts().CharIsSigned;
return std::nullopt;
}
@@ -8074,10 +8061,9 @@ static bool handleLValueToRValueBitCast(EvalInfo &Info, APValue &DestValue,
}
template <class Derived>
-class ExprEvaluatorBase
- : public ConstStmtVisitor<Derived, bool> {
+class ExprEvaluatorBase : public ConstStmtVisitor<Derived, bool> {
private:
- Derived &getDerived() { return static_cast<Derived&>(*this); }
+ Derived &getDerived() { return static_cast<Derived &>(*this); }
bool DerivedSuccess(const APValue &V, const Expr *E) {
return getDerived().Success(V, E);
}
@@ -8088,7 +8074,7 @@ class ExprEvaluatorBase
// Check whether a conditional operator with a non-constant condition is a
// potential constant expression. If neither arm is a potential constant
// expression, then the conditional operator is not either.
- template<typename ConditionalOperator>
+ template <typename ConditionalOperator>
void CheckPotentialConstantConditional(const ConditionalOperator *E) {
assert(Info.checkingPotentialConstantExpression());
@@ -8112,8 +8098,7 @@ class ExprEvaluatorBase
Error(E, diag::note_constexpr_conditional_never_const);
}
-
- template<typename ConditionalOperator>
+ template <typename ConditionalOperator>
bool HandleConditionalOperator(const ConditionalOperator *E) {
bool BoolResult;
if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
@@ -8167,9 +8152,7 @@ class ExprEvaluatorBase
bool VisitStmt(const Stmt *) {
llvm_unreachable("Expression evaluator should not be called on stmts");
}
- bool VisitExpr(const Expr *E) {
- return Error(E);
- }
+ bool VisitExpr(const Expr *E) { return Error(E); }
bool VisitEmbedExpr(const EmbedExpr *E) {
const auto It = E->begin();
@@ -8186,18 +8169,25 @@ class ExprEvaluatorBase
return StmtVisitorTy::Visit(E->getSubExpr());
}
- bool VisitParenExpr(const ParenExpr *E)
- { return StmtVisitorTy::Visit(E->getSubExpr()); }
- bool VisitUnaryExtension(const UnaryOperator *E)
- { return StmtVisitorTy::Visit(E->getSubExpr()); }
- bool VisitUnaryPlus(const UnaryOperator *E)
- { return StmtVisitorTy::Visit(E->getSubExpr()); }
- bool VisitChooseExpr(const ChooseExpr *E)
- { return StmtVisitorTy::Visit(E->getChosenSubExpr()); }
- bool VisitGenericSelectionExpr(const GenericSelectionExpr *E)
- { return StmtVisitorTy::Visit(E->getResultExpr()); }
- bool VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
- { return StmtVisitorTy::Visit(E->getReplacement()); }
+ bool VisitParenExpr(const ParenExpr *E) {
+ return StmtVisitorTy::Visit(E->getSubExpr());
+ }
+ bool VisitUnaryExtension(const UnaryOperator *E) {
+ return StmtVisitorTy::Visit(E->getSubExpr());
+ }
+ bool VisitUnaryPlus(const UnaryOperator *E) {
+ return StmtVisitorTy::Visit(E->getSubExpr());
+ }
+ bool VisitChooseExpr(const ChooseExpr *E) {
+ return StmtVisitorTy::Visit(E->getChosenSubExpr());
+ }
+ bool VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
+ return StmtVisitorTy::Visit(E->getResultExpr());
+ }
+ bool
+ VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E) {
+ return StmtVisitorTy::Visit(E->getReplacement());
+ }
bool VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E) {
TempVersionRAII RAII(*Info.CurrentCall);
SourceLocExprScopeGuard Guard(E, Info.CurrentCall->CurSourceLocExprScope);
@@ -8226,16 +8216,16 @@ class ExprEvaluatorBase
bool VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
CCEDiag(E, diag::note_constexpr_invalid_cast)
<< diag::ConstexprInvalidCastKind::Reinterpret;
- return static_cast<Derived*>(this)->VisitCastExpr(E);
+ return static_cast<Derived *>(this)->VisitCastExpr(E);
}
bool VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
if (!Info.Ctx.getLangOpts().CPlusPlus20)
CCEDiag(E, diag::note_constexpr_invalid_cast)
<< diag::ConstexprInvalidCastKind::Dynamic;
- return static_cast<Derived*>(this)->VisitCastExpr(E);
+ return static_cast<Derived *>(this)->VisitCastExpr(E);
}
bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
- return static_cast<Derived*>(this)->VisitCastExpr(E);
+ return static_cast<Derived *>(this)->VisitCastExpr(E);
}
bool VisitBinaryOperator(const BinaryOperator *E) {
@@ -8285,7 +8275,7 @@ class ExprEvaluatorBase
// side-effects. This is an important GNU extension. See GCC PR38377
// for discussion.
if (const CallExpr *CallCE =
- dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
+ dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
IsBcpCall = true;
@@ -8359,7 +8349,7 @@ class ExprEvaluatorBase
}
bool handleCallExpr(const CallExpr *E, APValue &Result,
- const LValue *ResultSlot) {
+ const LValue *ResultSlot) {
CallScopeRAII CallScope(Info);
const Expr *Callee = E->getCallee()->IgnoreParens();
@@ -8421,7 +8411,7 @@ class ExprEvaluatorBase
// Don't call function pointers which have been cast to some other type.
// Per DR (no number yet), the caller and callee can differ in noexcept.
if (!Info.Ctx.hasSameFunctionTypeIgnoringExceptionSpec(
- CalleeType->getPointeeType(), FD->getType())) {
+ CalleeType->getPointeeType(), FD->getType())) {
return Error(E);
}
@@ -8604,7 +8594,8 @@ class ExprEvaluatorBase
QualType BaseTy = E->getBase()->getType();
const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
- if (!FD) return Error(E);
+ if (!FD)
+ return Error(E);
assert(!FD->getType()->isReferenceType() && "prvalue reference?");
assert(BaseTy->castAsCanonical<RecordType>()->getDecl() ==
FD->getParent()->getCanonicalDecl() &&
@@ -8742,7 +8733,7 @@ class ExprEvaluatorBase
}
APValue ReturnValue;
- StmtResult Result = { ReturnValue, nullptr };
+ StmtResult Result = {ReturnValue, nullptr};
EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI);
if (ESR != ESR_Succeeded) {
// FIXME: If the statement-expression terminated due to 'return',
@@ -8763,9 +8754,7 @@ class ExprEvaluatorBase
}
/// Visit a value which is evaluated, but whose value is ignored.
- void VisitIgnoredValue(const Expr *E) {
- EvaluateIgnoredValue(Info, E);
- }
+ void VisitIgnoredValue(const Expr *E) { EvaluateIgnoredValue(Info, E); }
/// Potentially visit a MemberExpr's base expression.
void VisitIgnoredBaseExpression(const Expr *E) {
@@ -8783,9 +8772,8 @@ class ExprEvaluatorBase
// Common base class for lvalue and temporary evaluation.
//===----------------------------------------------------------------------===//
namespace {
-template<class Derived>
-class LValueExprEvaluatorBase
- : public ExprEvaluatorBase<Derived> {
+template <class Derived>
+class LValueExprEvaluatorBase : public ExprEvaluatorBase<Derived> {
protected:
LValue &Result;
bool InvalidBaseOK;
@@ -8885,7 +8873,7 @@ class LValueExprEvaluatorBase
}
}
};
-}
+} // namespace
//===----------------------------------------------------------------------===//
// LValue Evaluation
@@ -8922,10 +8910,10 @@ class LValueExprEvaluatorBase
//===----------------------------------------------------------------------===//
namespace {
class LValueExprEvaluator
- : public LValueExprEvaluatorBase<LValueExprEvaluator> {
+ : public LValueExprEvaluatorBase<LValueExprEvaluator> {
public:
- LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) :
- LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {}
+ LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK)
+ : LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {}
bool VisitVarDecl(const Expr *E, const VarDecl *VD);
bool VisitUnaryPreIncDec(const UnaryOperator *UO);
@@ -9042,7 +9030,8 @@ static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK) {
assert(!E->isValueDependent());
assert(E->isGLValue() || E->getType()->isFunctionType() ||
- E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E->IgnoreParens()));
+ E->getType()->isVoidType() ||
+ isa<ObjCSelectorExpr>(E->IgnoreParens()));
return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
}
@@ -9224,8 +9213,8 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
return true;
}
-bool
-LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+bool LValueExprEvaluator::VisitCompoundLiteralExpr(
+ const CompoundLiteralExpr *E) {
assert((!Info.getLangOpts().CPlusPlus || E->isFileScope()) &&
"lvalue compound literal in c++?");
APValue *Lit;
@@ -9264,8 +9253,8 @@ bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
} else {
if (!Info.Ctx.getLangOpts().CPlusPlus20) {
Info.CCEDiag(E, diag::note_constexpr_typeid_polymorphic)
- << E->getExprOperand()->getType()
- << E->getExprOperand()->getSourceRange();
+ << E->getExprOperand()->getType()
+ << E->getExprOperand()->getSourceRange();
}
if (!Visit(E->getExprOperand()))
@@ -9420,9 +9409,8 @@ bool LValueExprEvaluator::VisitUnaryPreIncDec(const UnaryOperator *UO) {
if (!this->Visit(UO->getSubExpr()))
return false;
- return handleIncDec(
- this->Info, UO, Result, UO->getSubExpr()->getType(),
- UO->isIncrementOp(), nullptr);
+ return handleIncDec(this->Info, UO, Result, UO->getSubExpr()->getType(),
+ UO->isIncrementOp(), nullptr);
}
bool LValueExprEvaluator::VisitCompoundAssignOperator(
@@ -9445,8 +9433,8 @@ bool LValueExprEvaluator::VisitCompoundAssignOperator(
return false;
return handleCompoundAssignment(
- this->Info, CAO,
- Result, CAO->getLHS()->getType(), CAO->getComputationLHSType(),
+ this->Info, CAO, Result, CAO->getLHS()->getType(),
+ CAO->getComputationLHSType(),
CAO->getOpForCompoundAssignment(CAO->getOpcode()), RHS);
}
@@ -9534,8 +9522,7 @@ static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
}
namespace {
-class PointerExprEvaluator
- : public ExprEvaluatorBase<PointerExprEvaluator> {
+class PointerExprEvaluator : public ExprEvaluatorBase<PointerExprEvaluator> {
LValue &Result;
bool InvalidBaseOK;
@@ -9553,8 +9540,8 @@ class PointerExprEvaluator
}
bool visitNonBuiltinCallExpr(const CallExpr *E);
-public:
+public:
PointerExprEvaluator(EvalInfo &info, LValue &Result, bool InvalidBaseOK)
: ExprEvaluatorBaseTy(info), Result(Result),
InvalidBaseOK(InvalidBaseOK) {}
@@ -9569,10 +9556,9 @@ class PointerExprEvaluator
}
bool VisitBinaryOperator(const BinaryOperator *E);
- bool VisitCastExpr(const CastExpr* E);
+ bool VisitCastExpr(const CastExpr *E);
bool VisitUnaryAddrOf(const UnaryOperator *E);
- bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
- { return Success(E); }
+ bool VisitObjCStringLiteral(const ObjCStringLiteral *E) { return Success(E); }
bool VisitObjCBoxedExpr(const ObjCBoxedExpr *E) {
if (E->isExpressibleAsConstantInitializer())
return Success(E);
@@ -9580,8 +9566,7 @@ class PointerExprEvaluator
EvaluateIgnoredValue(Info, E->getSubExpr());
return Error(E);
}
- bool VisitAddrLabelExpr(const AddrLabelExpr *E)
- { return Success(E); }
+ bool VisitAddrLabelExpr(const AddrLabelExpr *E) { return Success(E); }
bool VisitCallExpr(const CallExpr *E);
bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
bool VisitBlockExpr(const BlockExpr *E) {
@@ -9671,7 +9656,7 @@ class PointerExprEvaluator
};
} // end anonymous namespace
-static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
+static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK) {
assert(!E->isValueDependent());
assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
@@ -9679,8 +9664,7 @@ static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
}
bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
- if (E->getOpcode() != BO_Add &&
- E->getOpcode() != BO_Sub)
+ if (E->getOpcode() != BO_Add && E->getOpcode() != BO_Sub)
return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
const Expr *PExp = E->getLHS();
@@ -9802,9 +9786,10 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
// Now figure out the necessary offset to add to the base LV to get from
// the derived class to the base class.
- return HandleLValueBasePath(Info, E, E->getSubExpr()->getType()->
- castAs<PointerType>()->getPointeeType(),
- Result);
+ return HandleLValueBasePath(
+ Info, E,
+ E->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(),
+ Result);
case CK_BaseToDerived:
if (!Visit(E->getSubExpr()))
@@ -10130,8 +10115,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (!EvaluateInteger(E->getArg(1), Desired, Info))
return false;
uint64_t MaxLength = uint64_t(-1);
- if (BuiltinOp != Builtin::BIstrchr &&
- BuiltinOp != Builtin::BIwcschr &&
+ if (BuiltinOp != Builtin::BIstrchr && BuiltinOp != Builtin::BIwcschr &&
BuiltinOp != Builtin::BI__builtin_strchr &&
BuiltinOp != Builtin::BI__builtin_wcschr) {
APSInt N;
@@ -10148,9 +10132,8 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
QualType CharTy = Result.Designator.getType(Info.Ctx);
bool IsRawByte = BuiltinOp == Builtin::BImemchr ||
BuiltinOp == Builtin::BI__builtin_memchr;
- assert(IsRawByte ||
- Info.Ctx.hasSameUnqualifiedType(
- CharTy, E->getArg(0)->getType()->getPointeeType()));
+ assert(IsRawByte || Info.Ctx.hasSameUnqualifiedType(
+ CharTy, E->getArg(0)->getType()->getPointeeType()));
// Pointers to const void may point to objects of incomplete type.
if (IsRawByte && CharTy->isIncompleteType()) {
Info.FFDiag(E, diag::note_constexpr_ltor_incomplete_type) << CharTy;
@@ -10301,7 +10284,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
llvm::APInt::udivrem(OrigN, TSize, N, Remainder);
if (Remainder) {
Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported)
- << Move << WChar << 0 << T << toString(OrigN, 10, /*Signed*/false)
+ << Move << WChar << 0 << T << toString(OrigN, 10, /*Signed*/ false)
<< (unsigned)TSize;
return false;
}
@@ -10315,7 +10298,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (N.ugt(RemainingSrcSize) || N.ugt(RemainingDestSize)) {
Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported)
<< Move << WChar << (N.ugt(RemainingSrcSize) ? 1 : 2) << T
- << toString(N, 10, /*Signed*/false);
+ << toString(N, 10, /*Signed*/ false);
return false;
}
uint64_t NElems = N.getZExtValue();
@@ -10511,8 +10494,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
ArraySizeModifier::Normal, 0);
} else {
- assert(!AllocType->isArrayType() &&
- "array allocation with non-array new");
+ assert(!AllocType->isArrayType() && "array allocation with non-array new");
}
APValue *Val;
@@ -10615,24 +10597,24 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
namespace {
class MemberPointerExprEvaluator
- : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
+ : public ExprEvaluatorBase<MemberPointerExprEvaluator> {
MemberPtr &Result;
bool Success(const ValueDecl *D) {
Result = MemberPtr(D);
return true;
}
-public:
+public:
MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
- : ExprEvaluatorBaseTy(Info), Result(Result) {}
+ : ExprEvaluatorBaseTy(Info), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result.setFrom(V);
return true;
}
bool ZeroInitialization(const Expr *E) {
- return Success((const ValueDecl*)nullptr);
+ return Success((const ValueDecl *)nullptr);
}
bool VisitCastExpr(const CastExpr *E);
@@ -10683,7 +10665,8 @@ bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (!Visit(E->getSubExpr()))
return false;
for (CastExpr::path_const_iterator PathI = E->path_begin(),
- PathE = E->path_end(); PathI != PathE; ++PathI) {
+ PathE = E->path_end();
+ PathI != PathE; ++PathI) {
assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
if (!Result.castToBase(Base))
@@ -10704,42 +10687,41 @@ bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
//===----------------------------------------------------------------------===//
namespace {
- class RecordExprEvaluator
- : public ExprEvaluatorBase<RecordExprEvaluator> {
- const LValue &This;
- APValue &Result;
- public:
+class RecordExprEvaluator : public ExprEvaluatorBase<RecordExprEvaluator> {
+ const LValue &This;
+ APValue &Result;
- RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
+public:
+ RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
: ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
- bool Success(const APValue &V, const Expr *E) {
- Result = V;
- return true;
- }
- bool ZeroInitialization(const Expr *E) {
- return ZeroInitialization(E, E->getType());
- }
- bool ZeroInitialization(const Expr *E, QualType T);
+ bool Success(const APValue &V, const Expr *E) {
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return ZeroInitialization(E, E->getType());
+ }
+ bool ZeroInitialization(const Expr *E, QualType T);
- bool VisitCallExpr(const CallExpr *E) {
- return handleCallExpr(E, Result, &This);
- }
- bool VisitCastExpr(const CastExpr *E);
- bool VisitInitListExpr(const InitListExpr *E);
- bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
- return VisitCXXConstructExpr(E, E->getType());
- }
- bool VisitLambdaExpr(const LambdaExpr *E);
- bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
- bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T);
- bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
- bool VisitBinCmp(const BinaryOperator *E);
- bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
- bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
- ArrayRef<Expr *> Args);
- };
-}
+ bool VisitCallExpr(const CallExpr *E) {
+ return handleCallExpr(E, Result, &This);
+ }
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ return VisitCXXConstructExpr(E, E->getType());
+ }
+ bool VisitLambdaExpr(const LambdaExpr *E);
+ bool VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E, QualType T);
+ bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E);
+ bool VisitBinCmp(const BinaryOperator *E);
+ bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
+ bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
+ ArrayRef<Expr *> Args);
+};
+} // namespace
/// Perform zero-initialization on an object of non-union class type.
/// C++11 [dcl.init]p5:
@@ -10756,13 +10738,15 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
std::distance(RD->field_begin(), RD->field_end()));
- if (RD->isInvalidDecl()) return false;
+ if (RD->isInvalidDecl())
+ return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
if (CD) {
unsigned Index = 0;
for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
- End = CD->bases_end(); I != End; ++I, ++Index) {
+ End = CD->bases_end();
+ I != End; ++I, ++Index) {
const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
LValue Subobject = This;
if (!HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout))
@@ -10783,8 +10767,8 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
return false;
ImplicitValueInitExpr VIE(I->getType());
- if (!EvaluateInPlace(
- Result.getStructField(I->getFieldIndex()), Info, Subobject, &VIE))
+ if (!EvaluateInPlace(Result.getStructField(I->getFieldIndex()), Info,
+ Subobject, &VIE))
return false;
}
@@ -10793,7 +10777,8 @@ static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
const auto *RD = T->castAsRecordDecl();
- if (RD->isInvalidDecl()) return false;
+ if (RD->isInvalidDecl())
+ return false;
if (RD->isUnion()) {
// C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
// object's first non-static named data member is zero-initialized
@@ -10801,7 +10786,7 @@ bool RecordExprEvaluator::ZeroInitialization(const Expr *E, QualType T) {
while (I != RD->field_end() && (*I)->isUnnamedBitField())
++I;
if (I == RD->field_end()) {
- Result = APValue((const FieldDecl*)nullptr);
+ Result = APValue((const FieldDecl *)nullptr);
return true;
}
@@ -10841,7 +10826,8 @@ bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
APValue *Value = &DerivedObject;
const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
for (CastExpr::path_const_iterator PathI = E->path_begin(),
- PathE = E->path_end(); PathI != PathE; ++PathI) {
+ PathE = E->path_end();
+ PathI != PathE; ++PathI) {
assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
Value = &Value->getStructBase(getBaseIndex(RD, Base));
@@ -10862,7 +10848,8 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
bool RecordExprEvaluator::VisitCXXParenListOrInitListExpr(
const Expr *ExprToVisit, ArrayRef<Expr *> Args) {
const auto *RD = ExprToVisit->getType()->castAsRecordDecl();
- if (RD->isInvalidDecl()) return false;
+ if (RD->isInvalidDecl())
+ return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
@@ -11005,7 +10992,8 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
// Note that E's type is not necessarily the type of our class here; we might
// be initializing an array element instead.
const CXXConstructorDecl *FD = E->getConstructor();
- if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl()) return false;
+ if (FD->isInvalidDecl() || FD->getParent()->isInvalidDecl())
+ return false;
bool ZeroInit = E->requiresZeroInitialization();
if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
@@ -11040,9 +11028,8 @@ bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
return false;
auto Args = ArrayRef(E->getArgs(), E->getNumArgs());
- return HandleConstructorCall(E, This, Args,
- cast<CXXConstructorDecl>(Definition), Info,
- Result);
+ return HandleConstructorCall(
+ E, This, Args, cast<CXXConstructorDecl>(Definition), Info, Result);
}
bool RecordExprEvaluator::VisitCXXInheritedCtorInitExpr(
@@ -11129,7 +11116,7 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
"The number of lambda capture initializers should equal the number of "
"fields within the closure type");
- Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields);
+ Result = APValue(APValue::UninitStruct(), /*NumBases*/ 0, NumFields);
// Iterate through all the lambda's closure object's fields and initialize
// them.
auto *CaptureInitIt = E->capture_init_begin();
@@ -11160,8 +11147,8 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
return Success;
}
-static bool EvaluateRecord(const Expr *E, const LValue &This,
- APValue &Result, EvalInfo &Info) {
+static bool EvaluateRecord(const Expr *E, const LValue &This, APValue &Result,
+ EvalInfo &Info) {
assert(!E->isValueDependent());
assert(E->isPRValue() && E->getType()->isRecordType() &&
"can't evaluate expression as a record rvalue");
@@ -11177,10 +11164,10 @@ static bool EvaluateRecord(const Expr *E, const LValue &This,
//===----------------------------------------------------------------------===//
namespace {
class TemporaryExprEvaluator
- : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
+ : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
public:
- TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
- LValueExprEvaluatorBaseTy(Info, Result, false) {}
+ TemporaryExprEvaluator(EvalInfo &Info, LValue &Result)
+ : LValueExprEvaluatorBaseTy(Info, Result, false) {}
/// Visit an expression which constructs the value of this temporary.
bool VisitConstructExpr(const Expr *E) {
@@ -11204,15 +11191,11 @@ class TemporaryExprEvaluator
bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
return VisitConstructExpr(E);
}
- bool VisitCallExpr(const CallExpr *E) {
- return VisitConstructExpr(E);
- }
+ bool VisitCallExpr(const CallExpr *E) { return VisitConstructExpr(E); }
bool VisitCXXStdInitializerListExpr(const CXXStdInitializerListExpr *E) {
return VisitConstructExpr(E);
}
- bool VisitLambdaExpr(const LambdaExpr *E) {
- return VisitConstructExpr(E);
- }
+ bool VisitLambdaExpr(const LambdaExpr *E) { return VisitConstructExpr(E); }
};
} // end anonymous namespace
@@ -11228,44 +11211,42 @@ static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
//===----------------------------------------------------------------------===//
namespace {
- class VectorExprEvaluator
- : public ExprEvaluatorBase<VectorExprEvaluator> {
- APValue &Result;
- public:
+class VectorExprEvaluator : public ExprEvaluatorBase<VectorExprEvaluator> {
+ APValue &Result;
- VectorExprEvaluator(EvalInfo &info, APValue &Result)
+public:
+ VectorExprEvaluator(EvalInfo &info, APValue &Result)
: ExprEvaluatorBaseTy(info), Result(Result) {}
- bool Success(ArrayRef<APValue> V, const Expr *E) {
- assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
- // FIXME: remove this APValue copy.
- Result = APValue(V.data(), V.size());
- return true;
- }
- bool Success(const APValue &V, const Expr *E) {
- assert(V.isVector());
- Result = V;
- return true;
- }
- bool ZeroInitialization(const Expr *E);
-
- bool VisitUnaryReal(const UnaryOperator *E)
- { return Visit(E->getSubExpr()); }
- bool VisitCastExpr(const CastExpr* E);
- bool VisitInitListExpr(const InitListExpr *E);
- bool VisitUnaryImag(const UnaryOperator *E);
- bool VisitBinaryOperator(const BinaryOperator *E);
- bool VisitUnaryOperator(const UnaryOperator *E);
- bool VisitCallExpr(const CallExpr *E);
- bool VisitConvertVectorExpr(const ConvertVectorExpr *E);
- bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E);
-
- // FIXME: Missing: conditional operator (for GNU
- // conditional select), ExtVectorElementExpr
- };
+ bool Success(ArrayRef<APValue> V, const Expr *E) {
+ assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
+ // FIXME: remove this APValue copy.
+ Result = APValue(V.data(), V.size());
+ return true;
+ }
+ bool Success(const APValue &V, const Expr *E) {
+ assert(V.isVector());
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitUnaryReal(const UnaryOperator *E) { return Visit(E->getSubExpr()); }
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitConvertVectorExpr(const ConvertVectorExpr *E);
+ bool VisitShuffleVectorExpr(const ShuffleVectorExpr *E);
+
+ // FIXME: Missing: conditional operator (for GNU
+ // conditional select), ExtVectorElementExpr
+};
} // end anonymous namespace
-static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
+static bool EvaluateVector(const Expr *E, APValue &Result, EvalInfo &Info) {
assert(E->isPRValue() && E->getType()->isVectorType() &&
"not a vector prvalue");
return VectorExprEvaluator(Info, Result).Visit(E);
@@ -11350,8 +11331,7 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
}
}
-bool
-VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+bool VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
const VectorType *VT = E->getType()->castAs<VectorType>();
unsigned NumInits = E->getNumInits();
unsigned NumElements = VT->getNumElements();
@@ -11371,8 +11351,8 @@ VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
unsigned CountInits = 0, CountElts = 0;
while (CountElts < NumElements) {
// Handle nested vector initialization.
- if (CountInits < NumInits
- && E->getInit(CountInits)->getType()->isVectorType()) {
+ if (CountInits < NumInits &&
+ E->getInit(CountInits)->getType()->isVectorType()) {
APValue v;
if (!EvaluateVector(E->getInit(CountInits), v, Info))
return Error(E);
@@ -11404,8 +11384,7 @@ VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
return Success(Elements, E);
}
-bool
-VectorExprEvaluator::ZeroInitialization(const Expr *E) {
+bool VectorExprEvaluator::ZeroInitialization(const Expr *E) {
const auto *VT = E->getType()->castAs<VectorType>();
QualType EltTy = VT->getElementType();
APValue ZeroElement;
@@ -12876,72 +12855,70 @@ bool VectorExprEvaluator::VisitShuffleVectorExpr(const ShuffleVectorExpr *E) {
//===----------------------------------------------------------------------===//
namespace {
- class ArrayExprEvaluator
- : public ExprEvaluatorBase<ArrayExprEvaluator> {
- const LValue &This;
- APValue &Result;
- public:
+class ArrayExprEvaluator : public ExprEvaluatorBase<ArrayExprEvaluator> {
+ const LValue &This;
+ APValue &Result;
- ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
+public:
+ ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
: ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
- bool Success(const APValue &V, const Expr *E) {
- assert(V.isArray() && "expected array");
- Result = V;
- return true;
- }
-
- bool ZeroInitialization(const Expr *E) {
- const ConstantArrayType *CAT =
- Info.Ctx.getAsConstantArrayType(E->getType());
- if (!CAT) {
- if (E->getType()->isIncompleteArrayType()) {
- // We can be asked to zero-initialize a flexible array member; this
- // is represented as an ImplicitValueInitExpr of incomplete array
- // type. In this case, the array has zero elements.
- Result = APValue(APValue::UninitArray(), 0, 0);
- return true;
- }
- // FIXME: We could handle VLAs here.
- return Error(E);
- }
+ bool Success(const APValue &V, const Expr *E) {
+ assert(V.isArray() && "expected array");
+ Result = V;
+ return true;
+ }
- Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize());
- if (!Result.hasArrayFiller())
+ bool ZeroInitialization(const Expr *E) {
+ const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT) {
+ if (E->getType()->isIncompleteArrayType()) {
+ // We can be asked to zero-initialize a flexible array member; this
+ // is represented as an ImplicitValueInitExpr of incomplete array
+ // type. In this case, the array has zero elements.
+ Result = APValue(APValue::UninitArray(), 0, 0);
return true;
+ }
+ // FIXME: We could handle VLAs here.
+ return Error(E);
+ }
- // Zero-initialize all elements.
- LValue Subobject = This;
- Subobject.addArray(Info, E, CAT);
- ImplicitValueInitExpr VIE(CAT->getElementType());
- return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
- }
-
- bool VisitCallExpr(const CallExpr *E) {
- return handleCallExpr(E, Result, &This);
- }
- bool VisitInitListExpr(const InitListExpr *E,
- QualType AllocType = QualType());
- bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
- bool VisitCXXConstructExpr(const CXXConstructExpr *E);
- bool VisitCXXConstructExpr(const CXXConstructExpr *E,
- const LValue &Subobject,
- APValue *Value, QualType Type);
- bool VisitStringLiteral(const StringLiteral *E,
- QualType AllocType = QualType()) {
- expandStringLiteral(Info, E, Result, AllocType);
+ Result = APValue(APValue::UninitArray(), 0, CAT->getZExtSize());
+ if (!Result.hasArrayFiller())
return true;
- }
- bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
- bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
- ArrayRef<Expr *> Args,
- const Expr *ArrayFiller,
- QualType AllocType = QualType());
- };
+
+ // Zero-initialize all elements.
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ bool VisitCallExpr(const CallExpr *E) {
+ return handleCallExpr(E, Result, &This);
+ }
+ bool VisitInitListExpr(const InitListExpr *E,
+ QualType AllocType = QualType());
+ bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E, const LValue &Subobject,
+ APValue *Value, QualType Type);
+ bool VisitStringLiteral(const StringLiteral *E,
+ QualType AllocType = QualType()) {
+ expandStringLiteral(Info, E, Result, AllocType);
+ return true;
+ }
+ bool VisitCXXParenListInitExpr(const CXXParenListInitExpr *E);
+ bool VisitCXXParenListOrInitListExpr(const Expr *ExprToVisit,
+ ArrayRef<Expr *> Args,
+ const Expr *ArrayFiller,
+ QualType AllocType = QualType());
+};
} // end anonymous namespace
-static bool EvaluateArray(const Expr *E, const LValue &This,
- APValue &Result, EvalInfo &Info) {
+static bool EvaluateArray(const Expr *E, const LValue &This, APValue &Result,
+ EvalInfo &Info) {
assert(!E->isValueDependent());
assert(E->isPRValue() && E->getType()->isArrayType() &&
"not an array prvalue");
@@ -13152,10 +13129,10 @@ bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
// array element, if any.
FullExpressionRAII Scope(Info);
- if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
- Info, Subobject, E->getSubExpr()) ||
- !HandleLValueArrayAdjustment(Info, E, Subobject,
- CAT->getElementType(), 1)) {
+ if (!EvaluateInPlace(Result.getArrayInitializedElt(Index), Info, Subobject,
+ E->getSubExpr()) ||
+ !HandleLValueArrayAdjustment(Info, E, Subobject, CAT->getElementType(),
+ 1)) {
if (!Info.noteFailure())
return false;
Success = false;
@@ -13174,17 +13151,16 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
const LValue &Subobject,
- APValue *Value,
- QualType Type) {
+ APValue *Value, QualType Type) {
bool HadZeroInit = Value->hasValue();
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(Type)) {
unsigned FinalSize = CAT->getZExtSize();
// Preserve the array filler if we had prior zero-initialization.
- APValue Filler =
- HadZeroInit && Value->hasArrayFiller() ? Value->getArrayFiller()
- : APValue();
+ APValue Filler = HadZeroInit && Value->hasArrayFiller()
+ ? Value->getArrayFiller()
+ : APValue();
*Value = APValue(APValue::UninitArray(), 0, FinalSize);
if (FinalSize == 0)
@@ -13246,7 +13222,7 @@ bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E,
return Error(E);
return RecordExprEvaluator(Info, Subobject, *Value)
- .VisitCXXConstructExpr(E, Type);
+ .VisitCXXConstructExpr(E, Type);
}
bool ArrayExprEvaluator::VisitCXXParenListInitExpr(
@@ -13267,9 +13243,9 @@ bool ArrayExprEvaluator::VisitCXXParenListInitExpr(
//===----------------------------------------------------------------------===//
namespace {
-class IntExprEvaluator
- : public ExprEvaluatorBase<IntExprEvaluator> {
+class IntExprEvaluator : public ExprEvaluatorBase<IntExprEvaluator> {
APValue &Result;
+
public:
IntExprEvaluator(EvalInfo &info, APValue &result)
: ExprEvaluatorBaseTy(info), Result(result) {}
@@ -13295,7 +13271,7 @@ class IntExprEvaluator
"Invalid evaluation result.");
Result = APValue(APSInt(I));
Result.getInt().setIsUnsigned(
- E->getType()->isUnsignedIntegerOrEnumerationType());
+ E->getType()->isUnsignedIntegerOrEnumerationType());
return true;
}
bool Success(const llvm::APInt &I, const Expr *E) {
@@ -13365,7 +13341,7 @@ class IntExprEvaluator
bool VisitOffsetOfExpr(const OffsetOfExpr *E);
bool VisitUnaryOperator(const UnaryOperator *E);
- bool VisitCastExpr(const CastExpr* E);
+ bool VisitCastExpr(const CastExpr *E);
bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
@@ -13387,9 +13363,7 @@ class IntExprEvaluator
}
// Note, GNU defines __null as an integer, not a pointer.
- bool VisitGNUNullExpr(const GNUNullExpr *E) {
- return ZeroInitialization(E);
- }
+ bool VisitGNUNullExpr(const GNUNullExpr *E) { return ZeroInitialization(E); }
bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
if (E->isStoredAsBoolean())
@@ -13430,7 +13404,7 @@ class FixedPointExprEvaluator
: public ExprEvaluatorBase<FixedPointExprEvaluator> {
APValue &Result;
- public:
+public:
FixedPointExprEvaluator(EvalInfo &info, APValue &result)
: ExprEvaluatorBaseTy(info), Result(result) {}
@@ -13456,9 +13430,7 @@ class FixedPointExprEvaluator
return true;
}
- bool ZeroInitialization(const Expr *E) {
- return Success(0, E);
- }
+ bool ZeroInitialization(const Expr *E) { return Success(0, E); }
//===--------------------------------------------------------------------===//
// Visitor Methods
@@ -13545,14 +13517,14 @@ static bool EvaluateFixedPointOrInteger(const Expr *E, APFixedPoint &Result,
/// Check whether the given declaration can be directly converted to an integral
/// rvalue. If not, no diagnostic is produced; there are other things we can
/// try.
-bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
+bool IntExprEvaluator::CheckReferencedDecl(const Expr *E, const Decl *D) {
// Enums are integer constant exprs.
if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
// Check for signedness/width mismatches between E type and ECD value.
- bool SameSign = (ECD->getInitVal().isSigned()
- == E->getType()->isSignedIntegerOrEnumerationType());
- bool SameWidth = (ECD->getInitVal().getBitWidth()
- == Info.Ctx.getIntWidth(E->getType()));
+ bool SameSign = (ECD->getInitVal().isSigned() ==
+ E->getType()->isSignedIntegerOrEnumerationType());
+ bool SameWidth =
+ (ECD->getInitVal().getBitWidth() == Info.Ctx.getIntWidth(E->getType()));
if (SameSign && SameWidth)
return Success(ECD->getInitVal(), E);
else {
@@ -13585,17 +13557,20 @@ GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
#include "clang/AST/TypeNodes.inc"
case Type::Auto:
case Type::DeducedTemplateSpecialization:
- llvm_unreachable("unexpected non-canonical or dependent type");
+ llvm_unreachable("unexpected non-canonical or dependent type");
case Type::Builtin:
- switch (cast<BuiltinType>(CanTy)->getKind()) {
+ switch (cast<BuiltinType>(CanTy)->getKind()) {
#define BUILTIN_TYPE(ID, SINGLETON_ID)
-#define SIGNED_TYPE(ID, SINGLETON_ID) \
- case BuiltinType::ID: return GCCTypeClass::Integer;
-#define FLOATING_TYPE(ID, SINGLETON_ID) \
- case BuiltinType::ID: return GCCTypeClass::RealFloat;
-#define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \
- case BuiltinType::ID: break;
+#define SIGNED_TYPE(ID, SINGLETON_ID) \
+ case BuiltinType::ID: \
+ return GCCTypeClass::Integer;
+#define FLOATING_TYPE(ID, SINGLETON_ID) \
+ case BuiltinType::ID: \
+ return GCCTypeClass::RealFloat;
+#define PLACEHOLDER_TYPE(ID, SINGLETON_ID) \
+ case BuiltinType::ID: \
+ break;
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Void:
return GCCTypeClass::Void;
@@ -13635,22 +13610,19 @@ GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
case BuiltinType::ObjCSel:
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- case BuiltinType::Id:
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id:
#include "clang/Basic/OpenCLImageTypes.def"
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- case BuiltinType::Id:
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) case BuiltinType::Id:
#include "clang/Basic/OpenCLExtensionTypes.def"
case BuiltinType::OCLSampler:
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
case BuiltinType::OCLReserveID:
-#define SVE_TYPE(Name, Id, SingletonId) \
- case BuiltinType::Id:
+#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/AArch64ACLETypes.def"
-#define PPC_VECTOR_TYPE(Name, Id, Size) \
- case BuiltinType::Id:
+#define PPC_VECTOR_TYPE(Name, Id, Size) case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/RISCVVTypes.def"
@@ -13725,8 +13697,8 @@ GCCTypeClass EvaluateBuiltinClassifyType(QualType T,
/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
/// as GCC.
-static GCCTypeClass
-EvaluateBuiltinClassifyType(const CallExpr *E, const LangOptions &LangOpts) {
+static GCCTypeClass EvaluateBuiltinClassifyType(const CallExpr *E,
+ const LangOptions &LangOpts) {
// If no argument was supplied, default to None. This isn't
// ideal, however it is what gcc does.
if (E->getNumArgs() == 0)
@@ -13814,10 +13786,10 @@ static bool EvaluateBuiltinConstantP(EvalInfo &Info, const Expr *Arg) {
/// Retrieves the "underlying object type" of the given expression,
/// as used by __builtin_object_size.
static QualType getObjectType(APValue::LValueBase B) {
- if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl *>()) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->getType();
- } else if (const Expr *E = B.dyn_cast<const Expr*>()) {
+ } else if (const Expr *E = B.dyn_cast<const Expr *>()) {
if (isa<CompoundLiteralExpr>(E))
return E->getType();
} else if (B.is<TypeInfoLValue>()) {
@@ -14557,10 +14529,18 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return false;
unsigned Arg;
switch (Val.getCategory()) {
- case APFloat::fcNaN: Arg = 0; break;
- case APFloat::fcInfinity: Arg = 1; break;
- case APFloat::fcNormal: Arg = Val.isDenormal() ? 3 : 2; break;
- case APFloat::fcZero: Arg = 4; break;
+ case APFloat::fcNaN:
+ Arg = 0;
+ break;
+ case APFloat::fcInfinity:
+ Arg = 1;
+ break;
+ case APFloat::fcNormal:
+ Arg = Val.isDenormal() ? 3 : 2;
+ break;
+ case APFloat::fcZero:
+ Arg = 4;
+ break;
}
return Visit(E->getArg(Arg));
}
@@ -14854,8 +14834,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return false;
uint64_t MaxLength = uint64_t(-1);
- if (BuiltinOp != Builtin::BIstrcmp &&
- BuiltinOp != Builtin::BIwcscmp &&
+ if (BuiltinOp != Builtin::BIstrcmp && BuiltinOp != Builtin::BIwcscmp &&
BuiltinOp != Builtin::BI__builtin_strcmp &&
BuiltinOp != Builtin::BI__builtin_wcscmp) {
APSInt N;
@@ -14997,8 +14976,8 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
}
}
- return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
- Success(0, E) : Error(E);
+ return BuiltinOp == Builtin::BI__atomic_always_lock_free ? Success(0, E)
+ : Error(E);
}
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
@@ -15093,7 +15072,7 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
ResultType->isSignedIntegerOrEnumerationType();
bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
- ResultType->isSignedIntegerOrEnumerationType();
+ ResultType->isSignedIntegerOrEnumerationType();
uint64_t LHSSize = LHS.getBitWidth();
uint64_t RHSSize = RHS.getBitWidth();
uint64_t ResultSize = Info.Ctx.getTypeSize(ResultType);
@@ -15612,7 +15591,7 @@ class DataRecursiveIntBinOpEvaluator {
public:
DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
- : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
+ : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) {}
/// True if \param E is a binary operator that we are going to handle
/// data recursively.
@@ -15631,7 +15610,8 @@ class DataRecursiveIntBinOpEvaluator {
while (!Queue.empty())
process(PrevResult);
- if (PrevResult.Failed) return false;
+ if (PrevResult.Failed)
+ return false;
FinalResult.swap(PrevResult.Val);
return true;
@@ -15644,12 +15624,8 @@ class DataRecursiveIntBinOpEvaluator {
bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
return IntEval.Success(Value, E, Result);
}
- bool Error(const Expr *E) {
- return IntEval.Error(E);
- }
- bool Error(const Expr *E, diag::kind D) {
- return IntEval.Error(E, D);
- }
+ bool Error(const Expr *E) { return IntEval.Error(E); }
+ bool Error(const Expr *E, diag::kind D) { return IntEval.Error(E, D); }
OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
return Info.CCEDiag(E, D);
@@ -15672,17 +15648,17 @@ class DataRecursiveIntBinOpEvaluator {
void enqueue(const Expr *E) {
E = E->IgnoreParens();
- Queue.resize(Queue.size()+1);
+ Queue.resize(Queue.size() + 1);
Queue.back().E = E;
Queue.back().Kind = Job::AnyExprKind;
}
};
-}
+} // namespace
-bool DataRecursiveIntBinOpEvaluator::
- VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
- bool &SuppressRHSDiags) {
+bool DataRecursiveIntBinOpEvaluator::VisitBinOpLHSOnly(EvalResult &LHSResult,
+ const BinaryOperator *E,
+ bool &SuppressRHSDiags) {
if (E->getOpcode() == BO_Comma) {
// Ignore LHS but note if we could not evaluate it.
if (LHSResult.Failed)
@@ -15734,13 +15710,14 @@ static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index,
CharUnits &Offset = LVal.getLValueOffset();
uint64_t Offset64 = Offset.getQuantity();
uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
- Offset = CharUnits::fromQuantity(IsSub ? Offset64 - Index64
- : Offset64 + Index64);
+ Offset =
+ CharUnits::fromQuantity(IsSub ? Offset64 - Index64 : Offset64 + Index64);
}
-bool DataRecursiveIntBinOpEvaluator::
- VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
- const BinaryOperator *E, APValue &Result) {
+bool DataRecursiveIntBinOpEvaluator::VisitBinOp(const EvalResult &LHSResult,
+ const EvalResult &RHSResult,
+ const BinaryOperator *E,
+ APValue &Result) {
if (E->getOpcode() == BO_Comma) {
if (RHSResult.Failed)
return false;
@@ -15789,10 +15766,9 @@ bool DataRecursiveIntBinOpEvaluator::
}
// Handle cases like 4 + (unsigned long)&a
- if (E->getOpcode() == BO_Add &&
- RHSVal.isLValue() && LHSVal.isInt()) {
+ if (E->getOpcode() == BO_Add && RHSVal.isLValue() && LHSVal.isInt()) {
Result = RHSVal;
- addOrSubLValueAsInteger(Result, LHSVal.getInt(), /*IsSub*/false);
+ addOrSubLValueAsInteger(Result, LHSVal.getInt(), /*IsSub*/ false);
return true;
}
@@ -15801,8 +15777,8 @@ bool DataRecursiveIntBinOpEvaluator::
if (!LHSVal.getLValueOffset().isZero() ||
!RHSVal.getLValueOffset().isZero())
return false;
- const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
- const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
+ const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr *>();
+ const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr *>();
if (!LHSExpr || !RHSExpr)
return false;
const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
@@ -15836,43 +15812,43 @@ void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
Job &job = Queue.back();
switch (job.Kind) {
- case Job::AnyExprKind: {
- if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
- if (shouldEnqueue(Bop)) {
- job.Kind = Job::BinOpKind;
- enqueue(Bop->getLHS());
- return;
- }
- }
-
- EvaluateExpr(job.E, Result);
- Queue.pop_back();
- return;
- }
-
- case Job::BinOpKind: {
- const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
- bool SuppressRHSDiags = false;
- if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
- Queue.pop_back();
+ case Job::AnyExprKind: {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
+ if (shouldEnqueue(Bop)) {
+ job.Kind = Job::BinOpKind;
+ enqueue(Bop->getLHS());
return;
}
- if (SuppressRHSDiags)
- job.startSpeculativeEval(Info);
- job.LHSResult.swap(Result);
- job.Kind = Job::BinOpVisitedLHSKind;
- enqueue(Bop->getRHS());
- return;
}
- case Job::BinOpVisitedLHSKind: {
- const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
- EvalResult RHS;
- RHS.swap(Result);
- Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
+ EvaluateExpr(job.E, Result);
+ Queue.pop_back();
+ return;
+ }
+
+ case Job::BinOpKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ bool SuppressRHSDiags = false;
+ if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
Queue.pop_back();
return;
}
+ if (SuppressRHSDiags)
+ job.startSpeculativeEval(Info);
+ job.LHSResult.swap(Result);
+ job.Kind = Job::BinOpVisitedLHSKind;
+ enqueue(Bop->getRHS());
+ return;
+ }
+
+ case Job::BinOpVisitedLHSKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ EvalResult RHS;
+ RHS.swap(Result);
+ Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
+ Queue.pop_back();
+ return;
+ }
}
llvm_unreachable("Invalid Job::Kind!");
@@ -15968,9 +15944,9 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
if (LHS.isComplexFloat()) {
APFloat::cmpResult CR_r =
- LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
+ LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
APFloat::cmpResult CR_i =
- LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
+ LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
bool IsEqual = CR_r == APFloat::cmpEqual && CR_i == APFloat::cmpEqual;
return Success(IsEqual ? CmpResult::Equal : CmpResult::Unequal, E);
} else {
@@ -15981,8 +15957,7 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
}
}
- if (LHSTy->isRealFloatingType() &&
- RHSTy->isRealFloatingType()) {
+ if (LHSTy->isRealFloatingType() && RHSTy->isRealFloatingType()) {
APFloat RHS(0.0), LHS(0.0);
bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info);
@@ -15994,8 +15969,7 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
assert(E->isComparisonOp() && "Invalid binary operator!");
llvm::APFloatBase::cmpResult APFloatCmpResult = LHS.compare(RHS);
- if (!Info.InConstantContext &&
- APFloatCmpResult == APFloat::cmpUnordered &&
+ if (!Info.InConstantContext && APFloatCmpResult == APFloat::cmpUnordered &&
E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).isFPConstrained()) {
// Note: Compares may raise invalid in some cases involving NaN or sNaN.
Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
@@ -16035,7 +16009,7 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
if (Info.checkingPotentialConstantExpression() &&
(LHSValue.AllowConstexprUnknown || RHSValue.AllowConstexprUnknown))
return false;
- auto DiagComparison = [&] (unsigned DiagID, bool Reversed = false) {
+ auto DiagComparison = [&](unsigned DiagID, bool Reversed = false) {
std::string LHS = LHSValue.toString(Info.Ctx, E->getLHS()->getType());
std::string RHS = RHSValue.toString(Info.Ctx, E->getRHS()->getType());
Info.FFDiag(E, DiagID)
@@ -16084,7 +16058,7 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
true);
if (RHSValue.Base && RHSValue.Offset.isZero() &&
- isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue))
+ isOnePastTheEndOfCompleteObject(Info.Ctx, LHSValue))
return DiagComparison(diag::note_constexpr_pointer_comparison_past_end,
false);
// We can't tell whether an object is at the same address as another
@@ -16313,8 +16287,7 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
auto OnSuccess = [&](CmpResult CR, const BinaryOperator *E) {
assert((CR != CmpResult::Unequal || E->isEqualityOp()) &&
"should only produce Unequal for equality comparisons");
- bool IsEqual = CR == CmpResult::Equal,
- IsLess = CR == CmpResult::Less,
+ bool IsEqual = CR == CmpResult::Equal, IsLess = CR == CmpResult::Less,
IsGreater = CR == CmpResult::Greater;
auto Op = E->getOpcode();
switch (Op) {
@@ -16447,8 +16420,8 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
/// a result as the expression's type.
bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
- const UnaryExprOrTypeTraitExpr *E) {
- switch(E->getKind()) {
+ const UnaryExprOrTypeTraitExpr *E) {
+ switch (E->getKind()) {
case UETT_PreferredAlignOf:
case UETT_AlignOf: {
if (E->isArgumentType())
@@ -16499,11 +16472,11 @@ bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
}
case UETT_OpenMPRequiredSimdAlign:
assert(E->isArgumentType());
- return Success(
- Info.Ctx.toCharUnitsFromBits(
- Info.Ctx.getOpenMPDefaultSimdAlign(E->getArgumentType()))
- .getQuantity(),
- E);
+ return Success(Info.Ctx
+ .toCharUnitsFromBits(Info.Ctx.getOpenMPDefaultSimdAlign(
+ E->getArgumentType()))
+ .getQuantity(),
+ E);
case UETT_VectorElements: {
QualType Ty = E->getTypeOfArgument();
// If the vector has a fixed size, we can determine the number of elements
@@ -16594,7 +16567,8 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
const auto *RD = CurrentType->getAsRecordDecl();
if (!RD)
return Error(OOE);
- if (RD->isInvalidDecl()) return false;
+ if (RD->isInvalidDecl())
+ return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
unsigned i = MemberDecl->getFieldIndex();
assert(i < RL.getFieldCount() && "offsetof field in wrong type");
@@ -16615,7 +16589,8 @@ bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
const auto *RD = CurrentType->getAsCXXRecordDecl();
if (!RD)
return Error(OOE);
- if (RD->isInvalidDecl()) return false;
+ if (RD->isInvalidDecl())
+ return false;
const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
// Find the base class itself.
@@ -16649,7 +16624,8 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
case UO_Minus: {
if (!Visit(E->getSubExpr()))
return false;
- if (!Result.isInt()) return Error(E);
+ if (!Result.isInt())
+ return Error(E);
const APSInt &Value = Result.getInt();
if (Value.isSigned() && Value.isMinSignedValue() && E->canOverflow()) {
if (Info.checkingForUndefinedBehavior())
@@ -16668,7 +16644,8 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
case UO_Not: {
if (!Visit(E->getSubExpr()))
return false;
- if (!Result.isInt()) return Error(E);
+ if (!Result.isInt())
+ return Error(E);
return Success(~Result.getInt(), E);
}
case UO_LNot: {
@@ -16835,8 +16812,8 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
}
}
- return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
- Result.getInt()), E);
+ return Success(
+ HandleIntToIntCast(Info, E, DestType, SrcType, Result.getInt()), E);
}
case CK_PointerToIntegral: {
@@ -16934,7 +16911,7 @@ bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
}
bool IntExprEvaluator::VisitConceptSpecializationExpr(
- const ConceptSpecializationExpr *E) {
+ const ConceptSpecializationExpr *E) {
return Success(E->isSatisfied(), E);
}
@@ -16944,28 +16921,29 @@ bool IntExprEvaluator::VisitRequiresExpr(const RequiresExpr *E) {
bool FixedPointExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
- default:
- // Invalid unary operators
+ default:
+ // Invalid unary operators
+ return Error(E);
+ case UO_Plus:
+ // The result is just the value.
+ return Visit(E->getSubExpr());
+ case UO_Minus: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isFixedPoint())
return Error(E);
- case UO_Plus:
- // The result is just the value.
- return Visit(E->getSubExpr());
- case UO_Minus: {
- if (!Visit(E->getSubExpr())) return false;
- if (!Result.isFixedPoint())
- return Error(E);
- bool Overflowed;
- APFixedPoint Negated = Result.getFixedPoint().negate(&Overflowed);
- if (Overflowed && !HandleOverflow(Info, E, Negated, E->getType()))
- return false;
- return Success(Negated, E);
- }
- case UO_LNot: {
- bool bres;
- if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
- return false;
- return Success(!bres, E);
- }
+ bool Overflowed;
+ APFixedPoint Negated = Result.getFixedPoint().negate(&Overflowed);
+ if (Overflowed && !HandleOverflow(Info, E, Negated, E->getType()))
+ return false;
+ return Success(Negated, E);
+ }
+ case UO_LNot: {
+ bool bres;
+ if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
+ return false;
+ return Success(!bres, E);
+ }
}
}
@@ -16985,9 +16963,9 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
APFixedPoint Result = Src.convert(DestFXSema, &Overflowed);
if (Overflowed) {
if (Info.checkingForUndefinedBehavior())
- Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
- diag::warn_fixedpoint_constant_overflow)
- << Result.toString() << E->getType();
+ Info.Ctx.getDiagnostics().Report(
+ E->getExprLoc(), diag::warn_fixedpoint_constant_overflow)
+ << Result.toString() << E->getType();
if (!HandleOverflow(Info, E, Result, E->getType()))
return false;
}
@@ -17004,9 +16982,9 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (Overflowed) {
if (Info.checkingForUndefinedBehavior())
- Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
- diag::warn_fixedpoint_constant_overflow)
- << IntResult.toString() << E->getType();
+ Info.Ctx.getDiagnostics().Report(
+ E->getExprLoc(), diag::warn_fixedpoint_constant_overflow)
+ << IntResult.toString() << E->getType();
if (!HandleOverflow(Info, E, IntResult, E->getType()))
return false;
}
@@ -17024,9 +17002,9 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (Overflowed) {
if (Info.checkingForUndefinedBehavior())
- Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
- diag::warn_fixedpoint_constant_overflow)
- << Result.toString() << E->getType();
+ Info.Ctx.getDiagnostics().Report(
+ E->getExprLoc(), diag::warn_fixedpoint_constant_overflow)
+ << Result.toString() << E->getType();
if (!HandleOverflow(Info, E, Result, E->getType()))
return false;
}
@@ -17062,17 +17040,17 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
switch (E->getOpcode()) {
case BO_Add: {
Result = LHSFX.add(RHSFX, &OpOverflow)
- .convert(ResultFXSema, &ConversionOverflow);
+ .convert(ResultFXSema, &ConversionOverflow);
break;
}
case BO_Sub: {
Result = LHSFX.sub(RHSFX, &OpOverflow)
- .convert(ResultFXSema, &ConversionOverflow);
+ .convert(ResultFXSema, &ConversionOverflow);
break;
}
case BO_Mul: {
Result = LHSFX.mul(RHSFX, &OpOverflow)
- .convert(ResultFXSema, &ConversionOverflow);
+ .convert(ResultFXSema, &ConversionOverflow);
break;
}
case BO_Div: {
@@ -17081,7 +17059,7 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return false;
}
Result = LHSFX.div(RHSFX, &OpOverflow)
- .convert(ResultFXSema, &ConversionOverflow);
+ .convert(ResultFXSema, &ConversionOverflow);
break;
}
case BO_Shl:
@@ -17114,7 +17092,7 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (Info.checkingForUndefinedBehavior())
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_fixedpoint_constant_overflow)
- << Result.toString() << E->getType();
+ << Result.toString() << E->getType();
if (!HandleOverflow(Info, E, Result, E->getType()))
return false;
}
@@ -17126,12 +17104,12 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
//===----------------------------------------------------------------------===//
namespace {
-class FloatExprEvaluator
- : public ExprEvaluatorBase<FloatExprEvaluator> {
+class FloatExprEvaluator : public ExprEvaluatorBase<FloatExprEvaluator> {
APFloat &Result;
+
public:
FloatExprEvaluator(EvalInfo &info, APFloat &result)
- : ExprEvaluatorBaseTy(info), Result(result) {}
+ : ExprEvaluatorBaseTy(info), Result(result) {}
bool Success(const APValue &V, const Expr *e) {
Result = V.getFloat();
@@ -17157,19 +17135,18 @@ class FloatExprEvaluator
};
} // end anonymous namespace
-static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
+static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info) {
assert(!E->isValueDependent());
assert(E->isPRValue() && E->getType()->isRealFloatingType());
return FloatExprEvaluator(Info, Result).Visit(E);
}
-static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
- QualType ResultTy,
- const Expr *Arg,
- bool SNaN,
+static bool TryEvaluateBuiltinNaN(const ASTContext &Context, QualType ResultTy,
+ const Expr *Arg, bool SNaN,
llvm::APFloat &Result) {
const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
- if (!S) return false;
+ if (!S)
+ return false;
const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy);
@@ -17220,7 +17197,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_inff16:
case Builtin::BI__builtin_inff128: {
const llvm::fltSemantics &Sem =
- Info.Ctx.getFloatTypeSemantics(E->getType());
+ Info.Ctx.getFloatTypeSemantics(E->getType());
Result = llvm::APFloat::getInf(Sem);
return true;
}
@@ -17230,8 +17207,8 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nansl:
case Builtin::BI__builtin_nansf16:
case Builtin::BI__builtin_nansf128:
- if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
- true, Result))
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0), true,
+ Result))
return Error(E);
return true;
@@ -17242,8 +17219,8 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nanf128:
// If this is __builtin_nan() turn this into a nan, otherwise we
// can't constant fold it.
- if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
- false, Result))
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0), false,
+ Result))
return Error(E);
return true;
@@ -17267,9 +17244,9 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__arithmetic_fence:
return EvaluateFloat(E->getArg(0), Result, Info);
- // FIXME: Builtin::BI__builtin_powi
- // FIXME: Builtin::BI__builtin_powif
- // FIXME: Builtin::BI__builtin_powil
+ // FIXME: Builtin::BI__builtin_powi
+ // FIXME: Builtin::BI__builtin_powif
+ // FIXME: Builtin::BI__builtin_powil
case Builtin::BI__builtin_copysign:
case Builtin::BI__builtin_copysignf:
@@ -17392,7 +17369,8 @@ bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
- default: return Error(E);
+ default:
+ return Error(E);
case UO_Plus:
return EvaluateFloat(E->getSubExpr(), Result, Info);
case UO_Minus:
@@ -17424,7 +17402,7 @@ bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
}
bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
- const Expr* SubExpr = E->getSubExpr();
+ const Expr *SubExpr = E->getSubExpr();
switch (E->getCastKind()) {
default:
@@ -17432,11 +17410,10 @@ bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntegralToFloating: {
APSInt IntResult;
- const FPOptions FPO = E->getFPFeaturesInEffect(
- Info.Ctx.getLangOpts());
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
return EvaluateInteger(SubExpr, IntResult, Info) &&
- HandleIntToFloatCast(Info, E, FPO, SubExpr->getType(),
- IntResult, E->getType(), Result);
+ HandleIntToFloatCast(Info, E, FPO, SubExpr->getType(), IntResult,
+ E->getType(), Result);
}
case CK_FixedPointToFloating: {
@@ -17476,13 +17453,12 @@ bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
//===----------------------------------------------------------------------===//
namespace {
-class ComplexExprEvaluator
- : public ExprEvaluatorBase<ComplexExprEvaluator> {
+class ComplexExprEvaluator : public ExprEvaluatorBase<ComplexExprEvaluator> {
ComplexValue &Result;
public:
ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
- : ExprEvaluatorBaseTy(info), Result(Result) {}
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
bool Success(const APValue &V, const Expr *e) {
Result.setFrom(V);
@@ -17528,7 +17504,7 @@ bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
}
bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
- const Expr* SubExpr = E->getSubExpr();
+ const Expr *SubExpr = E->getSubExpr();
if (SubExpr->getType()->isRealFloatingType()) {
Result.makeComplexFloat();
@@ -17639,8 +17615,8 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
return false;
QualType To = E->getType()->castAs<ComplexType>()->getElementType();
- QualType From
- = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
+ QualType From =
+ E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) &&
HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag);
@@ -17651,13 +17627,13 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
return false;
QualType To = E->getType()->castAs<ComplexType>()->getElementType();
- QualType From
- = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
+ QualType From =
+ E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
Result.makeComplexInt();
- return HandleFloatToIntCast(Info, E, From, Result.FloatReal,
- To, Result.IntReal) &&
- HandleFloatToIntCast(Info, E, From, Result.FloatImag,
- To, Result.IntImag);
+ return HandleFloatToIntCast(Info, E, From, Result.FloatReal, To,
+ Result.IntReal) &&
+ HandleFloatToIntCast(Info, E, From, Result.FloatImag, To,
+ Result.IntImag);
}
case CK_IntegralRealToComplex: {
@@ -17675,8 +17651,8 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
return false;
QualType To = E->getType()->castAs<ComplexType>()->getElementType();
- QualType From
- = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
+ QualType From =
+ E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal);
Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag);
@@ -17687,16 +17663,15 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (!Visit(E->getSubExpr()))
return false;
- const FPOptions FPO = E->getFPFeaturesInEffect(
- Info.Ctx.getLangOpts());
+ const FPOptions FPO = E->getFPFeaturesInEffect(Info.Ctx.getLangOpts());
QualType To = E->getType()->castAs<ComplexType>()->getElementType();
- QualType From
- = E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
+ QualType From =
+ E->getSubExpr()->getType()->castAs<ComplexType>()->getElementType();
Result.makeComplexFloat();
- return HandleIntToFloatCast(Info, E, FPO, From, Result.IntReal,
- To, Result.FloatReal) &&
- HandleIntToFloatCast(Info, E, FPO, From, Result.IntImag,
- To, Result.FloatImag);
+ return HandleIntToFloatCast(Info, E, FPO, From, Result.IntReal, To,
+ Result.FloatReal) &&
+ HandleIntToFloatCast(Info, E, FPO, From, Result.IntImag, To,
+ Result.FloatImag);
}
}
@@ -17838,7 +17813,8 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
assert(!(LHSReal && RHSReal) &&
"Cannot have both operands of a complex operation be real.");
switch (E->getOpcode()) {
- default: return Error(E);
+ default:
+ return Error(E);
case BO_Add:
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
@@ -17905,11 +17881,11 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
} else {
ComplexValue LHS = Result;
Result.getComplexIntReal() =
- (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
- LHS.getComplexIntImag() * RHS.getComplexIntImag());
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
+ LHS.getComplexIntImag() * RHS.getComplexIntImag());
Result.getComplexIntImag() =
- (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
- LHS.getComplexIntImag() * RHS.getComplexIntReal());
+ (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
+ LHS.getComplexIntImag() * RHS.getComplexIntReal());
}
break;
case BO_Div:
@@ -17943,16 +17919,18 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
} else {
ComplexValue LHS = Result;
APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
- RHS.getComplexIntImag() * RHS.getComplexIntImag();
+ RHS.getComplexIntImag() * RHS.getComplexIntImag();
if (Den.isZero())
return Error(E, diag::note_expr_divide_by_zero);
Result.getComplexIntReal() =
- (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
- LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ LHS.getComplexIntImag() * RHS.getComplexIntImag()) /
+ Den;
Result.getComplexIntImag() =
- (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
- LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
+ (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
+ LHS.getComplexIntReal() * RHS.getComplexIntImag()) /
+ Den;
}
break;
}
@@ -17977,8 +17955,7 @@ bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().changeSign();
Result.getComplexFloatImag().changeSign();
- }
- else {
+ } else {
Result.getComplexIntReal() = -Result.getComplexIntReal();
Result.getComplexIntImag() = -Result.getComplexIntImag();
}
@@ -18036,10 +18013,10 @@ bool ComplexExprEvaluator::VisitCallExpr(const CallExpr *E) {
//===----------------------------------------------------------------------===//
namespace {
-class AtomicExprEvaluator :
- public ExprEvaluatorBase<AtomicExprEvaluator> {
+class AtomicExprEvaluator : public ExprEvaluatorBase<AtomicExprEvaluator> {
const LValue *This;
APValue &Result;
+
public:
AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result)
: ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
@@ -18086,8 +18063,7 @@ static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
//===----------------------------------------------------------------------===//
namespace {
-class VoidExprEvaluator
- : public ExprEvaluatorBase<VoidExprEvaluator> {
+class VoidExprEvaluator : public ExprEvaluatorBase<VoidExprEvaluator> {
public:
VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
@@ -18244,7 +18220,8 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
return false;
C.moveInto(Result);
} else if (T->isFixedPointType()) {
- if (!FixedPointExprEvaluator(Info, Result).Visit(E)) return false;
+ if (!FixedPointExprEvaluator(Info, Result).Visit(E))
+ return false;
} else if (T->isMemberPointerType()) {
MemberPtr P;
if (!EvaluateMemberPointer(E, P, Info))
@@ -18267,8 +18244,7 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
Result = Value;
} else if (T->isVoidType()) {
if (!Info.getLangOpts().CPlusPlus11)
- Info.CCEDiag(E, diag::note_constexpr_nonliteral)
- << E->getType();
+ Info.CCEDiag(E, diag::note_constexpr_nonliteral) << E->getType();
if (!EvaluateVoid(E, Info))
return false;
} else if (T->isAtomicType()) {
@@ -18621,7 +18597,7 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
// If we're evaluating a prvalue, fake up a MaterializeTemporaryExpr to
// represent the result of the evaluation. CheckConstantExpression ensures
// this doesn't escape.
- MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr*>(this), true);
+ MaterializeTemporaryExpr BaseMTE(T, const_cast<Expr *>(this), true);
APValue::LValueBase Base(&BaseMTE);
Info.setEvaluatingDecl(Base, Result.Val);
@@ -18858,13 +18834,13 @@ struct ICEDiag {
ICEDiag(ICEKind IK, SourceLocation l) : Kind(IK), Loc(l) {}
};
-}
+} // namespace
static ICEDiag NoDiag() { return ICEDiag(IK_ICE, SourceLocation()); }
static ICEDiag Worst(ICEDiag A, ICEDiag B) { return A.Kind >= B.Kind ? A : B; }
-static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
+static ICEDiag CheckEvalInICE(const Expr *E, const ASTContext &Ctx) {
Expr::EvalResult EVResult;
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvaluationMode::ConstantExpression);
@@ -18877,7 +18853,7 @@ static ICEDiag CheckEvalInICE(const Expr* E, const ASTContext &Ctx) {
return NoDiag();
}
-static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
+static ICEDiag CheckICE(const Expr *E, const ASTContext &Ctx) {
assert(!E->isValueDependent() && "Should not see value dependent exprs!");
if (!E->getType()->isIntegralOrEnumerationType())
return ICEDiag(IK_NotICE, E->getBeginLoc());
@@ -18995,8 +18971,8 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
return CheckICE(cast<PackIndexingExpr>(E)->getSelectedExpr(), Ctx);
case Expr::SubstNonTypeTemplateParmExprClass:
- return
- CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
+ return CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
+ Ctx);
case Expr::ConstantExprClass:
return CheckICE(cast<ConstantExpr>(E)->getSubExpr(), Ctx);
@@ -19088,7 +19064,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
}
case Expr::UnaryExprOrTypeTraitExprClass: {
const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E);
- if ((Exp->getKind() == UETT_SizeOf) &&
+ if ((Exp->getKind() == UETT_SizeOf) &&
Exp->getTypeOfArgument()->isVariableArrayType())
return ICEDiag(IK_NotICE, E->getBeginLoc());
if (Exp->getKind() == UETT_CountOf) {
@@ -19151,8 +19127,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case BO_Cmp: {
ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
- if (Exp->getOpcode() == BO_Div ||
- Exp->getOpcode() == BO_Rem) {
+ if (Exp->getOpcode() == BO_Div || Exp->getOpcode() == BO_Rem) {
// EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
// we don't evaluate one.
if (LHSResult.Kind == IK_ICE && RHSResult.Kind == IK_ICE) {
@@ -19207,8 +19182,8 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::ObjCBridgedCastExprClass: {
const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
if (isa<ExplicitCastExpr>(E)) {
- if (const FloatingLiteral *FL
- = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
+ if (const FloatingLiteral *FL =
+ dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
unsigned DestWidth = Ctx.getIntWidth(E->getType());
bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
APSInt IgnoredVal(DestWidth, !DestSigned);
@@ -19216,9 +19191,9 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
// If the value does not fit in the destination type, the behavior is
// undefined, so we are not required to treat it as a constant
// expression.
- if (FL->getValue().convertToInteger(IgnoredVal,
- llvm::APFloat::rmTowardZero,
- &Ignored) & APFloat::opInvalidOp)
+ if (FL->getValue().convertToInteger(
+ IgnoredVal, llvm::APFloat::rmTowardZero, &Ignored) &
+ APFloat::opInvalidOp)
return ICEDiag(IK_NotICE, E->getBeginLoc());
return NoDiag();
}
@@ -19238,12 +19213,16 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::BinaryConditionalOperatorClass: {
const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E);
ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx);
- if (CommonResult.Kind == IK_NotICE) return CommonResult;
+ if (CommonResult.Kind == IK_NotICE)
+ return CommonResult;
ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
- if (FalseResult.Kind == IK_NotICE) return FalseResult;
- if (CommonResult.Kind == IK_ICEIfUnevaluated) return CommonResult;
+ if (FalseResult.Kind == IK_NotICE)
+ return FalseResult;
+ if (CommonResult.Kind == IK_ICEIfUnevaluated)
+ return CommonResult;
if (FalseResult.Kind == IK_ICEIfUnevaluated &&
- Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0) return NoDiag();
+ Exp->getCommon()->EvaluateKnownConstInt(Ctx) != 0)
+ return NoDiag();
return FalseResult;
}
case Expr::ConditionalOperatorClass: {
@@ -19252,8 +19231,8 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
// then only the true side is actually considered in an integer constant
// expression, and it is fully evaluated. This is an important GNU
// extension. See GCC PR38377 for discussion.
- if (const CallExpr *CallCE
- = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
+ if (const CallExpr *CallCE =
+ dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
if (CallCE->getBuiltinCallee() == Builtin::BI__builtin_constant_p)
return CheckEvalInICE(E, Ctx);
ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
@@ -19309,7 +19288,8 @@ static bool EvaluateCPlusPlus11IntegralConstantExpr(const ASTContext &Ctx,
if (!Result.isInt())
return false;
- if (Value) *Value = Result.getInt();
+ if (Value)
+ *Value = Result.getInt();
return true;
}
@@ -19400,7 +19380,7 @@ bool Expr::isCXX11ConstantExpr(const ASTContext &Ctx, APValue *Result) const {
bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
const FunctionDecl *Callee,
- ArrayRef<const Expr*> Args,
+ ArrayRef<const Expr *> Args,
const Expr *This) const {
assert(!isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
@@ -19437,14 +19417,13 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
}
CallRef Call = Info.CurrentCall->createCall(Callee);
- for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+ for (ArrayRef<const Expr *>::iterator I = Args.begin(), E = Args.end();
I != E; ++I) {
unsigned Idx = I - Args.begin();
if (Idx >= Callee->getNumParams())
break;
const ParmVarDecl *PVD = Callee->getParamDecl(Idx);
- if ((*I)->isValueDependent() ||
- !EvaluateCallArg(PVD, *I, Call, Info) ||
+ if ((*I)->isValueDependent() || !EvaluateCallArg(PVD, *I, Call, Info) ||
Info.EvalStatus.HasSideEffects) {
// If evaluation fails, throw away the argument entirely.
if (APValue *Slot = Info.getParamSlot(Call, PVD))
@@ -19470,9 +19449,8 @@ bool Expr::EvaluateWithSubstitution(APValue &Value, ASTContext &Ctx,
!Info.EvalStatus.HasSideEffects;
}
-bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
- SmallVectorImpl<
- PartialDiagnosticAt> &Diags) {
+bool Expr::isPotentialConstantExpr(
+ const FunctionDecl *FD, SmallVectorImpl<PartialDiagnosticAt> &Diags) {
// FIXME: It would be useful to check constexpr function templates, but at the
// moment the constant expression evaluator cannot cope with the non-rigorous
// ASTs which we build for dependent expressions.
@@ -19511,7 +19489,7 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
: Info.Ctx.IntTy);
This.set({&VIE, Info.CurrentCall->Index});
- ArrayRef<const Expr*> Args;
+ ArrayRef<const Expr *> Args;
APValue Scratch;
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
@@ -19530,10 +19508,9 @@ bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
return Diags.empty();
}
-bool Expr::isPotentialConstantExprUnevaluated(Expr *E,
- const FunctionDecl *FD,
- SmallVectorImpl<
- PartialDiagnosticAt> &Diags) {
+bool Expr::isPotentialConstantExprUnevaluated(
+ Expr *E, const FunctionDecl *FD,
+ SmallVectorImpl<PartialDiagnosticAt> &Diags) {
assert(!E->isValueDependent() &&
"Expression evaluator can't be called on a dependent expression.");
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 97e48357f3ccc..28a7f231d12f7 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -32,291 +32,256 @@
/* Integer compare */
-#define _mm_cmp_epi8_mask(a, b, p) \
- ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1))
-
-#define _mm_mask_cmp_epi8_mask(m, a, b, p) \
- ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m)))
-
-#define _mm_cmp_epu8_mask(a, b, p) \
- ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)-1))
-
-#define _mm_mask_cmp_epu8_mask(m, a, b, p) \
- ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
- (__v16qi)(__m128i)(b), (int)(p), \
- (__mmask16)(m)))
-
-#define _mm256_cmp_epi8_mask(a, b, p) \
- ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1))
-
-#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
- ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m)))
-
-#define _mm256_cmp_epu8_mask(a, b, p) \
- ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)-1))
-
-#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
- ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
- (__v32qi)(__m256i)(b), (int)(p), \
- (__mmask32)(m)))
-
-#define _mm_cmp_epi16_mask(a, b, p) \
- ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1))
-
-#define _mm_mask_cmp_epi16_mask(m, a, b, p) \
- ((__mmask8)__builtin_ia32_cmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m)))
-
-#define _mm_cmp_epu16_mask(a, b, p) \
- ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)-1))
-
-#define _mm_mask_cmp_epu16_mask(m, a, b, p) \
- ((__mmask8)__builtin_ia32_ucmpw128_mask((__v8hi)(__m128i)(a), \
- (__v8hi)(__m128i)(b), (int)(p), \
- (__mmask8)(m)))
-
-#define _mm256_cmp_epi16_mask(a, b, p) \
- ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1))
-
-#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
- ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m)))
-
-#define _mm256_cmp_epu16_mask(a, b, p) \
- ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)-1))
-
-#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
- ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
- (__v16hi)(__m256i)(b), (int)(p), \
- (__mmask16)(m)))
-
-#define _mm_cmpeq_epi8_mask(A, B) \
- _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi8_mask(k, A, B) \
- _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi8_mask(A, B) \
- _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi8_mask(k, A, B) \
- _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi8_mask(A, B) \
- _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi8_mask(k, A, B) \
- _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi8_mask(A, B) \
- _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi8_mask(k, A, B) \
- _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi8_mask(A, B) \
- _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi8_mask(k, A, B) \
- _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi8_mask(A, B) \
- _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi8_mask(k, A, B) \
- _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi8_mask(A, B) \
- _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi8_mask(k, A, B) \
- _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi8_mask(A, B) \
- _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi8_mask(k, A, B) \
- _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi8_mask(A, B) \
- _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi8_mask(k, A, B) \
- _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi8_mask(A, B) \
- _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi8_mask(k, A, B) \
- _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi8_mask(A, B) \
- _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi8_mask(k, A, B) \
- _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi8_mask(A, B) \
- _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi8_mask(k, A, B) \
- _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu8_mask(A, B) \
- _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu8_mask(k, A, B) \
- _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu8_mask(A, B) \
- _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu8_mask(k, A, B) \
- _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu8_mask(A, B) \
- _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu8_mask(k, A, B) \
- _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu8_mask(A, B) \
- _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu8_mask(k, A, B) \
- _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu8_mask(A, B) \
- _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu8_mask(k, A, B) \
- _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu8_mask(A, B) \
- _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu8_mask(k, A, B) \
- _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu8_mask(A, B) \
- _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu8_mask(k, A, B) \
- _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu8_mask(A, B) \
- _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu8_mask(k, A, B) \
- _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu8_mask(A, B) \
- _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu8_mask(k, A, B) \
- _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu8_mask(A, B) \
- _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu8_mask(k, A, B) \
- _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu8_mask(A, B) \
- _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu8_mask(k, A, B) \
- _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu8_mask(A, B) \
- _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu8_mask(k, A, B) \
- _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epi16_mask(A, B) \
- _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epi16_mask(k, A, B) \
- _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epi16_mask(A, B) \
- _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epi16_mask(k, A, B) \
- _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epi16_mask(A, B) \
- _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epi16_mask(k, A, B) \
- _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epi16_mask(A, B) \
- _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epi16_mask(k, A, B) \
- _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epi16_mask(A, B) \
- _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epi16_mask(k, A, B) \
- _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epi16_mask(A, B) \
- _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epi16_mask(k, A, B) \
- _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epi16_mask(A, B) \
- _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epi16_mask(k, A, B) \
- _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epi16_mask(A, B) \
- _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epi16_mask(k, A, B) \
- _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epi16_mask(A, B) \
- _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epi16_mask(k, A, B) \
- _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epi16_mask(A, B) \
- _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epi16_mask(k, A, B) \
- _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epi16_mask(A, B) \
- _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epi16_mask(k, A, B) \
- _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epi16_mask(A, B) \
- _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epi16_mask(k, A, B) \
- _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm_cmpeq_epu16_mask(A, B) \
- _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm_mask_cmpeq_epu16_mask(k, A, B) \
- _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm_cmpge_epu16_mask(A, B) \
- _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm_mask_cmpge_epu16_mask(k, A, B) \
- _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm_cmpgt_epu16_mask(A, B) \
- _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm_mask_cmpgt_epu16_mask(k, A, B) \
- _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm_cmple_epu16_mask(A, B) \
- _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm_mask_cmple_epu16_mask(k, A, B) \
- _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm_cmplt_epu16_mask(A, B) \
- _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm_mask_cmplt_epu16_mask(k, A, B) \
- _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm_cmpneq_epu16_mask(A, B) \
- _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm_mask_cmpneq_epu16_mask(k, A, B) \
- _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
-
-#define _mm256_cmpeq_epu16_mask(A, B) \
- _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
-#define _mm256_mask_cmpeq_epu16_mask(k, A, B) \
- _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
-#define _mm256_cmpge_epu16_mask(A, B) \
- _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
-#define _mm256_mask_cmpge_epu16_mask(k, A, B) \
- _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
-#define _mm256_cmpgt_epu16_mask(A, B) \
- _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
-#define _mm256_mask_cmpgt_epu16_mask(k, A, B) \
- _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
-#define _mm256_cmple_epu16_mask(A, B) \
- _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
-#define _mm256_mask_cmple_epu16_mask(k, A, B) \
- _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
-#define _mm256_cmplt_epu16_mask(A, B) \
- _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
-#define _mm256_mask_cmplt_epu16_mask(k, A, B) \
- _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
-#define _mm256_cmpneq_epu16_mask(A, B) \
- _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
-#define _mm256_mask_cmpneq_epu16_mask(k, A, B) \
- _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
+#define _mm_cmp_epi8_mask(a, b, p) \
+ ((__mmask16)__builtin_ia32_cmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16) - 1))
+
+#define _mm_mask_cmp_epi8_mask(m, a, b, p) \
+ ((__mmask16)__builtin_ia32_cmpb128_mask( \
+ (__v16qi)(__m128i)(a), (__v16qi)(__m128i)(b), (int)(p), (__mmask16)(m)))
+
+#define _mm_cmp_epu8_mask(a, b, p) \
+ ((__mmask16)__builtin_ia32_ucmpb128_mask((__v16qi)(__m128i)(a), \
+ (__v16qi)(__m128i)(b), (int)(p), \
+ (__mmask16) - 1))
+
+#define _mm_mask_cmp_epu8_mask(m, a, b, p) \
+ ((__mmask16)__builtin_ia32_ucmpb128_mask( \
+ (__v16qi)(__m128i)(a), (__v16qi)(__m128i)(b), (int)(p), (__mmask16)(m)))
+
+#define _mm256_cmp_epi8_mask(a, b, p) \
+ ((__mmask32)__builtin_ia32_cmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32) - 1))
+
+#define _mm256_mask_cmp_epi8_mask(m, a, b, p) \
+ ((__mmask32)__builtin_ia32_cmpb256_mask( \
+ (__v32qi)(__m256i)(a), (__v32qi)(__m256i)(b), (int)(p), (__mmask32)(m)))
+
+#define _mm256_cmp_epu8_mask(a, b, p) \
+ ((__mmask32)__builtin_ia32_ucmpb256_mask((__v32qi)(__m256i)(a), \
+ (__v32qi)(__m256i)(b), (int)(p), \
+ (__mmask32) - 1))
+
+#define _mm256_mask_cmp_epu8_mask(m, a, b, p) \
+ ((__mmask32)__builtin_ia32_ucmpb256_mask( \
+ (__v32qi)(__m256i)(a), (__v32qi)(__m256i)(b), (int)(p), (__mmask32)(m)))
+
+#define _mm_cmp_epi16_mask(a, b, p) \
+ ((__mmask8)__builtin_ia32_cmpw128_mask( \
+ (__v8hi)(__m128i)(a), (__v8hi)(__m128i)(b), (int)(p), (__mmask8) - 1))
+
+#define _mm_mask_cmp_epi16_mask(m, a, b, p) \
+ ((__mmask8)__builtin_ia32_cmpw128_mask( \
+ (__v8hi)(__m128i)(a), (__v8hi)(__m128i)(b), (int)(p), (__mmask8)(m)))
+
+#define _mm_cmp_epu16_mask(a, b, p) \
+ ((__mmask8)__builtin_ia32_ucmpw128_mask( \
+ (__v8hi)(__m128i)(a), (__v8hi)(__m128i)(b), (int)(p), (__mmask8) - 1))
+
+#define _mm_mask_cmp_epu16_mask(m, a, b, p) \
+ ((__mmask8)__builtin_ia32_ucmpw128_mask( \
+ (__v8hi)(__m128i)(a), (__v8hi)(__m128i)(b), (int)(p), (__mmask8)(m)))
+
+#define _mm256_cmp_epi16_mask(a, b, p) \
+ ((__mmask16)__builtin_ia32_cmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16) - 1))
+
+#define _mm256_mask_cmp_epi16_mask(m, a, b, p) \
+ ((__mmask16)__builtin_ia32_cmpw256_mask( \
+ (__v16hi)(__m256i)(a), (__v16hi)(__m256i)(b), (int)(p), (__mmask16)(m)))
+
+#define _mm256_cmp_epu16_mask(a, b, p) \
+ ((__mmask16)__builtin_ia32_ucmpw256_mask((__v16hi)(__m256i)(a), \
+ (__v16hi)(__m256i)(b), (int)(p), \
+ (__mmask16) - 1))
+
+#define _mm256_mask_cmp_epu16_mask(m, a, b, p) \
+ ((__mmask16)__builtin_ia32_ucmpw256_mask( \
+ (__v16hi)(__m256i)(a), (__v16hi)(__m256i)(b), (int)(p), (__mmask16)(m)))
+
+#define _mm_cmpeq_epi8_mask(A, B) _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm_mask_cmpeq_epi8_mask(k, A, B) \
+ _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm_cmpge_epi8_mask(A, B) _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
+#define _mm_mask_cmpge_epi8_mask(k, A, B) \
+ _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm_cmpgt_epi8_mask(A, B) _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
+#define _mm_mask_cmpgt_epi8_mask(k, A, B) \
+ _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm_cmple_epi8_mask(A, B) _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
+#define _mm_mask_cmple_epi8_mask(k, A, B) \
+ _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm_cmplt_epi8_mask(A, B) _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
+#define _mm_mask_cmplt_epi8_mask(k, A, B) \
+ _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm_cmpneq_epi8_mask(A, B) _mm_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
+#define _mm_mask_cmpneq_epi8_mask(k, A, B) \
+ _mm_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
+
+#define _mm256_cmpeq_epi8_mask(A, B) \
+ _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm256_mask_cmpeq_epi8_mask(k, A, B) \
+ _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm256_cmpge_epi8_mask(A, B) \
+ _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GE)
+#define _mm256_mask_cmpge_epi8_mask(k, A, B) \
+ _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm256_cmpgt_epi8_mask(A, B) \
+ _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_GT)
+#define _mm256_mask_cmpgt_epi8_mask(k, A, B) \
+ _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm256_cmple_epi8_mask(A, B) \
+ _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LE)
+#define _mm256_mask_cmple_epi8_mask(k, A, B) \
+ _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm256_cmplt_epi8_mask(A, B) \
+ _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_LT)
+#define _mm256_mask_cmplt_epi8_mask(k, A, B) \
+ _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm256_cmpneq_epi8_mask(A, B) \
+ _mm256_cmp_epi8_mask((A), (B), _MM_CMPINT_NE)
+#define _mm256_mask_cmpneq_epi8_mask(k, A, B) \
+ _mm256_mask_cmp_epi8_mask((k), (A), (B), _MM_CMPINT_NE)
+
+#define _mm_cmpeq_epu8_mask(A, B) _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm_mask_cmpeq_epu8_mask(k, A, B) \
+ _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm_cmpge_epu8_mask(A, B) _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
+#define _mm_mask_cmpge_epu8_mask(k, A, B) \
+ _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm_cmpgt_epu8_mask(A, B) _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
+#define _mm_mask_cmpgt_epu8_mask(k, A, B) \
+ _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm_cmple_epu8_mask(A, B) _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
+#define _mm_mask_cmple_epu8_mask(k, A, B) \
+ _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm_cmplt_epu8_mask(A, B) _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
+#define _mm_mask_cmplt_epu8_mask(k, A, B) \
+ _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm_cmpneq_epu8_mask(A, B) _mm_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
+#define _mm_mask_cmpneq_epu8_mask(k, A, B) \
+ _mm_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
+
+#define _mm256_cmpeq_epu8_mask(A, B) \
+ _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm256_mask_cmpeq_epu8_mask(k, A, B) \
+ _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm256_cmpge_epu8_mask(A, B) \
+ _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GE)
+#define _mm256_mask_cmpge_epu8_mask(k, A, B) \
+ _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm256_cmpgt_epu8_mask(A, B) \
+ _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_GT)
+#define _mm256_mask_cmpgt_epu8_mask(k, A, B) \
+ _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm256_cmple_epu8_mask(A, B) \
+ _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LE)
+#define _mm256_mask_cmple_epu8_mask(k, A, B) \
+ _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm256_cmplt_epu8_mask(A, B) \
+ _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_LT)
+#define _mm256_mask_cmplt_epu8_mask(k, A, B) \
+ _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm256_cmpneq_epu8_mask(A, B) \
+ _mm256_cmp_epu8_mask((A), (B), _MM_CMPINT_NE)
+#define _mm256_mask_cmpneq_epu8_mask(k, A, B) \
+ _mm256_mask_cmp_epu8_mask((k), (A), (B), _MM_CMPINT_NE)
+
+#define _mm_cmpeq_epi16_mask(A, B) _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm_mask_cmpeq_epi16_mask(k, A, B) \
+ _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm_cmpge_epi16_mask(A, B) _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
+#define _mm_mask_cmpge_epi16_mask(k, A, B) \
+ _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm_cmpgt_epi16_mask(A, B) _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
+#define _mm_mask_cmpgt_epi16_mask(k, A, B) \
+ _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm_cmple_epi16_mask(A, B) _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
+#define _mm_mask_cmple_epi16_mask(k, A, B) \
+ _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm_cmplt_epi16_mask(A, B) _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
+#define _mm_mask_cmplt_epi16_mask(k, A, B) \
+ _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm_cmpneq_epi16_mask(A, B) _mm_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
+#define _mm_mask_cmpneq_epi16_mask(k, A, B) \
+ _mm_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
+
+#define _mm256_cmpeq_epi16_mask(A, B) \
+ _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm256_mask_cmpeq_epi16_mask(k, A, B) \
+ _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm256_cmpge_epi16_mask(A, B) \
+ _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GE)
+#define _mm256_mask_cmpge_epi16_mask(k, A, B) \
+ _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm256_cmpgt_epi16_mask(A, B) \
+ _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_GT)
+#define _mm256_mask_cmpgt_epi16_mask(k, A, B) \
+ _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm256_cmple_epi16_mask(A, B) \
+ _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LE)
+#define _mm256_mask_cmple_epi16_mask(k, A, B) \
+ _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm256_cmplt_epi16_mask(A, B) \
+ _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_LT)
+#define _mm256_mask_cmplt_epi16_mask(k, A, B) \
+ _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm256_cmpneq_epi16_mask(A, B) \
+ _mm256_cmp_epi16_mask((A), (B), _MM_CMPINT_NE)
+#define _mm256_mask_cmpneq_epi16_mask(k, A, B) \
+ _mm256_mask_cmp_epi16_mask((k), (A), (B), _MM_CMPINT_NE)
+
+#define _mm_cmpeq_epu16_mask(A, B) _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm_mask_cmpeq_epu16_mask(k, A, B) \
+ _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm_cmpge_epu16_mask(A, B) _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
+#define _mm_mask_cmpge_epu16_mask(k, A, B) \
+ _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm_cmpgt_epu16_mask(A, B) _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
+#define _mm_mask_cmpgt_epu16_mask(k, A, B) \
+ _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm_cmple_epu16_mask(A, B) _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
+#define _mm_mask_cmple_epu16_mask(k, A, B) \
+ _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm_cmplt_epu16_mask(A, B) _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
+#define _mm_mask_cmplt_epu16_mask(k, A, B) \
+ _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm_cmpneq_epu16_mask(A, B) _mm_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
+#define _mm_mask_cmpneq_epu16_mask(k, A, B) \
+ _mm_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
+
+#define _mm256_cmpeq_epu16_mask(A, B) \
+ _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_EQ)
+#define _mm256_mask_cmpeq_epu16_mask(k, A, B) \
+ _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_EQ)
+#define _mm256_cmpge_epu16_mask(A, B) \
+ _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GE)
+#define _mm256_mask_cmpge_epu16_mask(k, A, B) \
+ _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GE)
+#define _mm256_cmpgt_epu16_mask(A, B) \
+ _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_GT)
+#define _mm256_mask_cmpgt_epu16_mask(k, A, B) \
+ _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_GT)
+#define _mm256_cmple_epu16_mask(A, B) \
+ _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LE)
+#define _mm256_mask_cmple_epu16_mask(k, A, B) \
+ _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LE)
+#define _mm256_cmplt_epu16_mask(A, B) \
+ _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_LT)
+#define _mm256_mask_cmplt_epu16_mask(k, A, B) \
+ _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_LT)
+#define _mm256_cmpneq_epu16_mask(A, B) \
+ _mm256_cmp_epu16_mask((A), (B), _MM_CMPINT_NE)
+#define _mm256_mask_cmpneq_epu16_mask(k, A, B) \
+ _mm256_mask_cmp_epu16_mask((k), (A), (B), _MM_CMPINT_NE)
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_add_epi8(__A, __B),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_add_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -328,23 +293,21 @@ _mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_add_epi16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_add_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_add_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_add_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_sub_epi8(__A, __B),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_sub_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -356,23 +319,21 @@ _mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sub_epi16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sub_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sub_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sub_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_add_epi8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_add_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -384,9 +345,8 @@ _mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_add_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_add_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -398,9 +358,8 @@ _mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_sub_epi8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_sub_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -412,9 +371,8 @@ _mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_sub_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_sub_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -426,23 +384,21 @@ _mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mullo_epi16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mullo_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mullo_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mullo_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_mullo_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_mullo_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -454,51 +410,44 @@ _mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_blend_epi8(__mmask16 __U, __m128i __A, __m128i __W) {
- return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
- (__v16qi) __W,
- (__v16qi) __A);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, (__v16qi)__W,
+ (__v16qi)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_blend_epi8(__mmask32 __U, __m256i __A, __m256i __W) {
- return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
- (__v32qi) __W,
- (__v32qi) __A);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, (__v32qi)__W,
+ (__v32qi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_blend_epi16(__mmask8 __U, __m128i __A, __m128i __W) {
- return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
- (__v8hi) __W,
- (__v8hi) __A);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, (__v8hi)__W,
+ (__v8hi)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W) {
- return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
- (__v16hi) __W,
- (__v16hi) __A);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, (__v16hi)__W,
+ (__v16hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_abs_epi8(__A),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_abs_epi8(__A), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_abs_epi8(__A),
- (__v16qi)_mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_abs_epi8(__A), (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_abs_epi8(__A),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_abs_epi8(__A), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -510,23 +459,20 @@ _mm256_maskz_abs_epi8(__mmask32 __U, __m256i __A) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_abs_epi16(__A),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_abs_epi16(__A), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_abs_epi16(__A),
- (__v8hi)_mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_abs_epi16(__A), (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_abs_epi16(__A),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_abs_epi16(__A), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -544,251 +490,204 @@ _mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_packs_epi32(__A, __B),
- (__v8hi)__W);
+_mm_mask_packs_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_packs_epi32(__A, __B), (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_packs_epi32(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_packs_epi32(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_packs_epi32(__A, __B),
- (__v16hi)__W);
+_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_packs_epi32(__A, __B), (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B)
-{
+_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
(__v16qi)_mm_packs_epi16(__A, __B),
(__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm_packs_epi16(__A, __B),
- (__v16qi)__W);
+_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm_packs_epi16(__A, __B), (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_packs_epi16(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_packs_epi16(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_packs_epi16(__A, __B),
- (__v32qi)__W);
+_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_packs_epi16(__A, __B), (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B)
-{
+_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
(__v8hi)_mm_packus_epi32(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_packus_epi32(__A, __B),
- (__v8hi)__W);
+_mm_mask_packus_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_packus_epi32(__A, __B), (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_packus_epi32(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_packus_epi32(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_packus_epi32(__A, __B),
- (__v16hi)__W);
+_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_packus_epi32(__A, __B), (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm_packus_epi16(__A, __B),
- (__v16qi)_mm_setzero_si128());
+_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm_packus_epi16(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm_packus_epi16(__A, __B),
- (__v16qi)__W);
+_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm_packus_epi16(__A, __B), (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_packus_epi16(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_packus_epi16(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_packus_epi16(__A, __B),
- (__v32qi)__W);
+_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_packus_epi16(__A, __B), (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_adds_epi8(__A, __B),
- (__v16qi)__W);
+_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_adds_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_adds_epi8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_adds_epi8(__A, __B),
- (__v32qi)__W);
+_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_adds_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_adds_epi8(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_adds_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_adds_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_adds_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_adds_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_adds_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_adds_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_adds_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_adds_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_adds_epu8(__A, __B),
- (__v16qi)__W);
+_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_adds_epu8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_adds_epu8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_adds_epu8(__A, __B),
- (__v32qi)__W);
+_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_adds_epu8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_adds_epu8(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_adds_epu8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_adds_epu16(__A, __B),
- (__v8hi)__W);
+_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_adds_epu16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_adds_epu16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_adds_epu16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_adds_epu16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_adds_epu16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_adds_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -852,9 +751,8 @@ _mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm_max_epi8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm_max_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -866,9 +764,8 @@ _mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_max_epi8(__A, __B),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_max_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -880,23 +777,21 @@ _mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_max_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_max_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_max_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_max_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_max_epi16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_max_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -908,9 +803,8 @@ _mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm_max_epu8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm_max_epu8(__A, __B), (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -922,9 +816,8 @@ _mm256_maskz_max_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_max_epu8(__A, __B),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_max_epu8(__A, __B), (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -936,23 +829,21 @@ _mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_max_epu16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_max_epu16(__A, __B), (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_max_epu16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_max_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_max_epu16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_max_epu16(__A, __B), (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -964,9 +855,8 @@ _mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm_min_epi8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm_min_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -978,9 +868,8 @@ _mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_min_epi8(__A, __B),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_min_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -992,23 +881,21 @@ _mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_min_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_min_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_min_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_min_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_min_epi16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_min_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -1020,9 +907,8 @@ _mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm_min_epu8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm_min_epu8(__A, __B), (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -1034,9 +920,8 @@ _mm256_maskz_min_epu8(__mmask32 __M, __m256i __A, __m256i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
- (__v32qi)_mm256_min_epu8(__A, __B),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__M, (__v32qi)_mm256_min_epu8(__A, __B), (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -1048,283 +933,234 @@ _mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_min_epu16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_min_epu16(__A, __B), (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_min_epu16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_min_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_min_epu16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_min_epu16(__A, __B), (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_shuffle_epi8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_shuffle_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_shuffle_epi8(__A, __B),
- (__v16qi)_mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_shuffle_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_shuffle_epi8(__A, __B),
- (__v32qi)__W);
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_shuffle_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_shuffle_epi8(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_shuffle_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_subs_epi8(__A, __B),
- (__v16qi)__W);
+_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_subs_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_subs_epi8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_subs_epi8(__A, __B),
- (__v32qi)__W);
+_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_subs_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_subs_epi8(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_subs_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_subs_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_subs_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_subs_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_subs_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_subs_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_subs_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_subs_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_subs_epu8(__A, __B),
- (__v16qi)__W);
+_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_subs_epu8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
(__v16qi)_mm_subs_epu8(__A, __B),
(__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_subs_epu8(__A, __B),
- (__v32qi)__W);
+_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_subs_epu8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_subs_epu8(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_subs_epu8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_subs_epu16(__A, __B),
- (__v8hi)__W);
+_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_subs_epu16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_subs_epu16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_subs_epu16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_subs_epu16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_subs_epu16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_subs_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B)
-{
+_mm_permutex2var_epi16(__m128i __A, __m128i __I, __m128i __B) {
return (__m128i)__builtin_ia32_vpermi2varhi128((__v8hi)__A, (__v8hi)__I,
- (__v8hi) __B);
+ (__v8hi)__B);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_permutex2var_epi16(__m128i __A, __mmask8 __U, __m128i __I,
- __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128(__U,
- (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
- (__v8hi)__A);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutex2var_epi16(
+ __m128i __A, __mmask8 __U, __m128i __I, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ __U, (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), (__v8hi)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask2_permutex2var_epi16(__m128i __A, __m128i __I, __mmask8 __U,
- __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128(__U,
- (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
- (__v8hi)__I);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask2_permutex2var_epi16(
+ __m128i __A, __m128i __I, __mmask8 __U, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ __U, (__v8hi)_mm_permutex2var_epi16(__A, __I, __B), (__v8hi)__I);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_permutex2var_epi16 (__mmask8 __U, __m128i __A, __m128i __I,
- __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128(__U,
- (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
- (__v8hi)_mm_setzero_si128());
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_maskz_permutex2var_epi16(
+ __mmask8 __U, __m128i __A, __m128i __I, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ __U, (__v8hi)_mm_permutex2var_epi16(__A, __I, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B)
-{
+_mm256_permutex2var_epi16(__m256i __A, __m256i __I, __m256i __B) {
return (__m256i)__builtin_ia32_vpermi2varhi256((__v16hi)__A, (__v16hi)__I,
(__v16hi)__B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutex2var_epi16(__m256i __A, __mmask16 __U, __m256i __I,
- __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256(__U,
- (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
- (__v16hi)__A);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutex2var_epi16(
+ __m256i __A, __mmask16 __U, __m256i __I, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ __U, (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), (__v16hi)__A);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask2_permutex2var_epi16(__m256i __A, __m256i __I, __mmask16 __U,
- __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256(__U,
- (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
- (__v16hi)__I);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask2_permutex2var_epi16(
+ __m256i __A, __m256i __I, __mmask16 __U, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ __U, (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B), (__v16hi)__I);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A, __m256i __I,
- __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256(__U,
- (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
- (__v16hi)_mm256_setzero_si256());
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_permutex2var_epi16(
+ __mmask16 __U, __m256i __A, __m256i __I, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ __U, (__v16hi)_mm256_permutex2var_epi16(__A, __I, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_maddubs_epi16(__X, __Y),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_maddubs_epi16(__X, __Y), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_maddubs_epi16(__X, __Y),
- (__v8hi)_mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_maddubs_epi16(__X, __Y),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X,
__m256i __Y) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_maddubs_epi16(__X, __Y),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_maddubs_epi16(__X, __Y), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_maddubs_epi16(__X, __Y),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_maddubs_epi16(__X, __Y),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_madd_epi16(__A, __B),
- (__v4si)__W);
+ return (__m128i)__builtin_ia32_selectd_128(
+ (__mmask8)__U, (__v4si)_mm_madd_epi16(__A, __B), (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -1336,100 +1172,87 @@ _mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_madd_epi16(__A, __B),
- (__v8si)__W);
+ return (__m256i)__builtin_ia32_selectd_256(
+ (__mmask8)__U, (__v8si)_mm256_madd_epi16(__A, __B), (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_madd_epi16(__A, __B),
- (__v8si)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectd_256(
+ (__mmask8)__U, (__v8si)_mm256_madd_epi16(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtsepi16_epi8 (__m128i __A) {
- return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
- (__v16qi) _mm_setzero_si128(),
- (__mmask8) -1);
+_mm_cvtsepi16_epi8(__m128i __A) {
+ return (__m128i)__builtin_ia32_pmovswb128_mask(
+ (__v8hi)__A, (__v16qi)_mm_setzero_si128(), (__mmask8)-1);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
- return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
- (__v16qi) __O,
- __M);
+_mm_mask_cvtsepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_pmovswb128_mask((__v8hi)__A, (__v16qi)__O,
+ __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtsepi16_epi8 (__mmask8 __M, __m128i __A) {
- return (__m128i) __builtin_ia32_pmovswb128_mask ((__v8hi) __A,
- (__v16qi) _mm_setzero_si128(),
- __M);
+_mm_maskz_cvtsepi16_epi8(__mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_pmovswb128_mask(
+ (__v8hi)__A, (__v16qi)_mm_setzero_si128(), __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtsepi16_epi8 (__m256i __A) {
- return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
- (__v16qi) _mm_setzero_si128(),
- (__mmask16) -1);
+_mm256_cvtsepi16_epi8(__m256i __A) {
+ return (__m128i)__builtin_ia32_pmovswb256_mask(
+ (__v16hi)__A, (__v16qi)_mm_setzero_si128(), (__mmask16)-1);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
- return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
- (__v16qi) __O,
- __M);
+_mm256_mask_cvtsepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i)__builtin_ia32_pmovswb256_mask((__v16hi)__A, (__v16qi)__O,
+ __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtsepi16_epi8 (__mmask16 __M, __m256i __A) {
- return (__m128i) __builtin_ia32_pmovswb256_mask ((__v16hi) __A,
- (__v16qi) _mm_setzero_si128(),
- __M);
+_mm256_maskz_cvtsepi16_epi8(__mmask16 __M, __m256i __A) {
+ return (__m128i)__builtin_ia32_pmovswb256_mask(
+ (__v16hi)__A, (__v16qi)_mm_setzero_si128(), __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_cvtusepi16_epi8 (__m128i __A) {
- return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
- (__v16qi) _mm_setzero_si128(),
- (__mmask8) -1);
+_mm_cvtusepi16_epi8(__m128i __A) {
+ return (__m128i)__builtin_ia32_pmovuswb128_mask(
+ (__v8hi)__A, (__v16qi)_mm_setzero_si128(), (__mmask8)-1);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
- return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
- (__v16qi) __O,
- __M);
+_mm_mask_cvtusepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_pmovuswb128_mask((__v8hi)__A, (__v16qi)__O,
+ __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtusepi16_epi8 (__mmask8 __M, __m128i __A) {
- return (__m128i) __builtin_ia32_pmovuswb128_mask ((__v8hi) __A,
- (__v16qi) _mm_setzero_si128(),
- __M);
+_mm_maskz_cvtusepi16_epi8(__mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_pmovuswb128_mask(
+ (__v8hi)__A, (__v16qi)_mm_setzero_si128(), __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_cvtusepi16_epi8 (__m256i __A) {
- return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
- (__v16qi) _mm_setzero_si128(),
- (__mmask16) -1);
+_mm256_cvtusepi16_epi8(__m256i __A) {
+ return (__m128i)__builtin_ia32_pmovuswb256_mask(
+ (__v16hi)__A, (__v16qi)_mm_setzero_si128(), (__mmask16)-1);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi16_epi8 (__m128i __O, __mmask16 __M, __m256i __A) {
- return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
- (__v16qi) __O,
- __M);
+_mm256_mask_cvtusepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) {
+ return (__m128i)__builtin_ia32_pmovuswb256_mask((__v16hi)__A, (__v16qi)__O,
+ __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtusepi16_epi8 (__mmask16 __M, __m256i __A) {
- return (__m128i) __builtin_ia32_pmovuswb256_mask ((__v16hi) __A,
- (__v16qi) _mm_setzero_si128(),
- __M);
+_mm256_maskz_cvtusepi16_epi8(__mmask16 __M, __m256i __A) {
+ return (__m128i)__builtin_ia32_pmovuswb256_mask(
+ (__v16hi)__A, (__v16qi)_mm_setzero_si128(), __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -1441,48 +1264,40 @@ _mm_cvtepi16_epi8(__m128i __A) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi16_epi8 (__m128i __O, __mmask8 __M, __m128i __A) {
- return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
- (__v16qi) __O,
- __M);
+_mm_mask_cvtepi16_epi8(__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_pmovwb128_mask((__v8hi)__A, (__v16qi)__O, __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi16_epi8 (__mmask8 __M, __m128i __A) {
- return (__m128i) __builtin_ia32_pmovwb128_mask ((__v8hi) __A,
- (__v16qi) _mm_setzero_si128(),
- __M);
+_mm_maskz_cvtepi16_epi8(__mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_pmovwb128_mask(
+ (__v8hi)__A, (__v16qi)_mm_setzero_si128(), __M);
}
static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
- __builtin_ia32_pmovwb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+_mm_mask_cvtepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) {
+ __builtin_ia32_pmovwb128mem_mask((__v16qi *)__P, (__v8hi)__A, __M);
}
-
static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
- __builtin_ia32_pmovswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+_mm_mask_cvtsepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) {
+ __builtin_ia32_pmovswb128mem_mask((__v16qi *)__P, (__v8hi)__A, __M);
}
static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m128i __A)
-{
- __builtin_ia32_pmovuswb128mem_mask ((__v16qi *) __P, (__v8hi) __A, __M);
+_mm_mask_cvtusepi16_storeu_epi8(void *__P, __mmask8 __M, __m128i __A) {
+ __builtin_ia32_pmovuswb128mem_mask((__v16qi *)__P, (__v8hi)__A, __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_cvtepi16_epi8(__m256i __A) {
- return (__m128i)__builtin_convertvector((__v16hi) __A, __v16qi);
+ return (__m128i) __builtin_convertvector((__v16hi)__A, __v16qi);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_cvtepi16_epi8(__m128i __O, __mmask16 __M, __m256i __A) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
- (__v16qi)_mm256_cvtepi16_epi8(__A),
- (__v16qi)__O);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__M, (__v16qi)_mm256_cvtepi16_epi8(__A), (__v16qi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS256_CONSTEXPR
@@ -1493,28 +1308,24 @@ _mm256_maskz_cvtepi16_epi8(__mmask16 __M, __m256i __A) {
}
static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
-{
- __builtin_ia32_pmovwb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
+_mm256_mask_cvtepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A) {
+ __builtin_ia32_pmovwb256mem_mask((__v16qi *)__P, (__v16hi)__A, __M);
}
static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtsepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
-{
- __builtin_ia32_pmovswb256mem_mask ((__v16qi *) __P, (__v16hi) __A, __M);
+_mm256_mask_cvtsepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A) {
+ __builtin_ia32_pmovswb256mem_mask((__v16qi *)__P, (__v16hi)__A, __M);
}
static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
-{
- __builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
+_mm256_mask_cvtusepi16_storeu_epi8(void *__P, __mmask16 __M, __m256i __A) {
+ __builtin_ia32_pmovuswb256mem_mask((__v16qi *)__P, (__v16hi)__A, __M);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_mulhrs_epi16(__X, __Y),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_mulhrs_epi16(__X, __Y), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -1526,16 +1337,15 @@ _mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mulhrs_epi16(__X, __Y),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mulhrs_epi16(__X, __Y), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_mulhrs_epi16(__X, __Y),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_mulhrs_epi16(__X, __Y),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -1598,510 +1408,438 @@ _mm_mask_unpackhi_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_unpackhi_epi8(__A, __B),
- (__v16qi)_mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_unpackhi_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_unpackhi_epi8(__A, __B),
- (__v32qi)__W);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi8(
+ __m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_unpackhi_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_unpackhi_epi8(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_unpackhi_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpackhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_unpackhi_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_unpackhi_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpackhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_unpackhi_epi16(__A, __B),
- (__v8hi) _mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_unpackhi_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpackhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_unpackhi_epi16(__A, __B),
- (__v16hi)__W);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpackhi_epi16(
+ __m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_unpackhi_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpackhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_unpackhi_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_unpackhi_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_unpacklo_epi8(__A, __B),
- (__v16qi)__W);
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_unpacklo_epi8(__A, __B), (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
- (__v16qi)_mm_unpacklo_epi8(__A, __B),
- (__v16qi)_mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectb_128(
+ (__mmask16)__U, (__v16qi)_mm_unpacklo_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_unpacklo_epi8(__A, __B),
- (__v32qi)__W);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi8(
+ __m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_unpacklo_epi8(__A, __B), (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
- (__v32qi)_mm256_unpacklo_epi8(__A, __B),
- (__v32qi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectb_256(
+ (__mmask32)__U, (__v32qi)_mm256_unpacklo_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_unpacklo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_unpacklo_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_unpacklo_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_unpacklo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_unpacklo_epi16(__A, __B),
- (__v8hi) _mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_unpacklo_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_unpacklo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_unpacklo_epi16(__A, __B),
- (__v16hi)__W);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_unpacklo_epi16(
+ __m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_unpacklo_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_unpacklo_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_unpacklo_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_cvtepi8_epi16(__A),
- (__v8hi)__W);
+_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_cvtepi8_epi16(__A), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A)
-{
+_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_cvtepi8_epi16(__A),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_cvtepi8_epi16(__A),
- (__v16hi)__W);
+_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_cvtepi8_epi16(__A), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A)
-{
+_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_cvtepi8_epi16(__A),
(__v16hi)_mm256_setzero_si256());
}
-
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_cvtepu8_epi16(__A),
- (__v8hi)__W);
+_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_cvtepu8_epi16(__A), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A)
-{
+_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_cvtepu8_epi16(__A),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_cvtepu8_epi16(__A),
- (__v16hi)__W);
+_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_cvtepu8_epi16(__A), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
-{
+_mm256_maskz_cvtepu8_epi16(__mmask16 __U, __m128i __A) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_cvtepu8_epi16(__A),
(__v16hi)_mm256_setzero_si256());
}
+#define _mm_mask_shufflehi_epi16(W, U, A, imm) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+ (__v8hi)(__m128i)(W)))
-#define _mm_mask_shufflehi_epi16(W, U, A, imm) \
- ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W)))
+#define _mm_maskz_shufflehi_epi16(U, A, imm) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
+ (__v8hi)_mm_setzero_si128()))
-#define _mm_maskz_shufflehi_epi16(U, A, imm) \
- ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflehi_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_si128()))
+#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
+ ((__m256i)__builtin_ia32_selectw_256( \
+ (__mmask16)(U), (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+ (__v16hi)(__m256i)(W)))
-#define _mm256_mask_shufflehi_epi16(W, U, A, imm) \
- ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)(__m256i)(W)))
+#define _mm256_maskz_shufflehi_epi16(U, A, imm) \
+ ((__m256i)__builtin_ia32_selectw_256( \
+ (__mmask16)(U), (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
+ (__v16hi)_mm256_setzero_si256()))
-#define _mm256_maskz_shufflehi_epi16(U, A, imm) \
- ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflehi_epi16((A), (imm)), \
- (__v16hi)_mm256_setzero_si256()))
+#define _mm_mask_shufflelo_epi16(W, U, A, imm) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+ (__v8hi)(__m128i)(W)))
-#define _mm_mask_shufflelo_epi16(W, U, A, imm) \
- ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)(__m128i)(W)))
+#define _mm_maskz_shufflelo_epi16(U, A, imm) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
+ (__v8hi)_mm_setzero_si128()))
-#define _mm_maskz_shufflelo_epi16(U, A, imm) \
- ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_shufflelo_epi16((A), (imm)), \
- (__v8hi)_mm_setzero_si128()))
+#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
+ ((__m256i)__builtin_ia32_selectw_256( \
+ (__mmask16)(U), (__v16hi)_mm256_shufflelo_epi16((A), (imm)), \
+ (__v16hi)(__m256i)(W)))
-#define _mm256_mask_shufflelo_epi16(W, U, A, imm) \
- ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflelo_epi16((A), \
- (imm)), \
- (__v16hi)(__m256i)(W)))
-
-#define _mm256_maskz_shufflelo_epi16(U, A, imm) \
- ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_shufflelo_epi16((A), \
- (imm)), \
- (__v16hi)_mm256_setzero_si256()))
+#define _mm256_maskz_shufflelo_epi16(U, A, imm) \
+ ((__m256i)__builtin_ia32_selectw_256( \
+ (__mmask16)(U), (__v16hi)_mm256_shufflelo_epi16((A), (imm)), \
+ (__v16hi)_mm256_setzero_si256()))
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_sllv_epi16(__m256i __A, __m256i __B)
-{
+_mm256_sllv_epi16(__m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sllv_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sllv_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sllv_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sllv_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_sllv_epi16(__m128i __A, __m128i __B)
-{
+_mm_sllv_epi16(__m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_sllv_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_sllv_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_sllv_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_sll_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_sll_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_sll_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_sll_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sll_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sll_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sll_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sll_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_slli_epi16(__A, (int)__B),
- (__v8hi)__W);
+_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_slli_epi16(__A, (int)__B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_slli_epi16(__A, (int)__B),
- (__v8hi)_mm_setzero_si128());
+_mm_maskz_slli_epi16(__mmask8 __U, __m128i __A, unsigned int __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_slli_epi16(__A, (int)__B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
unsigned int __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_slli_epi16(__A, (int)__B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_slli_epi16(__A, (int)__B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_slli_epi16(__A, (int)__B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_slli_epi16(__A, (int)__B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_srlv_epi16(__m256i __A, __m256i __B)
-{
+_mm256_srlv_epi16(__m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srlv_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srlv_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srlv_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srlv_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_srlv_epi16(__m128i __A, __m128i __B)
-{
+_mm_srlv_epi16(__m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srlv_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_srlv_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srlv_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_srav_epi16(__m256i __A, __m256i __B)
-{
+_mm256_srav_epi16(__m256i __A, __m256i __B) {
return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srav_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srav_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srav_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srav_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_srav_epi16(__m128i __A, __m128i __B)
-{
+_mm_srav_epi16(__m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srav_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_srav_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
-_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srav_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_sra_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_sra_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_sra_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sra_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sra_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_sra_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_sra_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srai_epi16(__A, (int)__B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_srai_epi16(__A, (int)__B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srai_epi16(__A, (int)__B),
- (__v8hi)_mm_setzero_si128());
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_srai_epi16(__A, (int)__B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
unsigned int __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srai_epi16(__A, (int)__B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srai_epi16(__A, (int)__B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srai_epi16(__A, (int)__B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srai_epi16(__A, (int)__B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srl_epi16(__A, __B),
- (__v8hi)__W);
+_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_srl_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
-{
+_mm_maskz_srl_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_srl_epi16(__A, __B),
(__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srl_epi16(__A, __B),
- (__v16hi)__W);
+_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srl_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srl_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srl_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B) {
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srli_epi16(__A, __B),
- (__v8hi)__W);
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__U, (__v8hi)_mm_srli_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
@@ -2113,619 +1851,511 @@ _mm_maskz_srli_epi16(__mmask8 __U, __m128i __A, int __B) {
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srli_epi16(__A, __B),
- (__v16hi)__W);
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srli_epi16(__A, __B), (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B) {
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srli_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__U, (__v16hi)_mm256_srli_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mov_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
- (__v8hi) __A,
- (__v8hi) __W);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, (__v8hi)__A,
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mov_epi16(__mmask8 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
- (__v8hi) __A,
- (__v8hi) _mm_setzero_si128 ());
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U, (__v8hi)__A,
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mov_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
- (__v16hi) __A,
- (__v16hi) __W);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, (__v16hi)__A,
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mov_epi16(__mmask16 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
- (__v16hi) __A,
- (__v16hi) _mm256_setzero_si256 ());
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U, (__v16hi)__A,
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mov_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
- (__v16qi) __A,
- (__v16qi) __W);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, (__v16qi)__A,
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mov_epi8(__mmask16 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
- (__v16qi) __A,
- (__v16qi) _mm_setzero_si128 ());
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U, (__v16qi)__A,
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mov_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
- (__v32qi) __A,
- (__v32qi) __W);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, (__v32qi)__A,
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mov_epi8(__mmask32 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
- (__v32qi) __A,
- (__v32qi) _mm256_setzero_si256 ());
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U, (__v32qi)__A,
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi8 (__m128i __O, __mmask16 __M, char __A)
-{
- return (__m128i) __builtin_ia32_selectb_128(__M,
- (__v16qi) _mm_set1_epi8(__A),
- (__v16qi) __O);
+_mm_mask_set1_epi8(__m128i __O, __mmask16 __M, char __A) {
+ return (__m128i)__builtin_ia32_selectb_128(__M, (__v16qi)_mm_set1_epi8(__A),
+ (__v16qi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi8 (__mmask16 __M, char __A)
-{
- return (__m128i) __builtin_ia32_selectb_128(__M,
- (__v16qi) _mm_set1_epi8(__A),
- (__v16qi) _mm_setzero_si128());
+_mm_maskz_set1_epi8(__mmask16 __M, char __A) {
+ return (__m128i)__builtin_ia32_selectb_128(__M, (__v16qi)_mm_set1_epi8(__A),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi8 (__m256i __O, __mmask32 __M, char __A)
-{
- return (__m256i) __builtin_ia32_selectb_256(__M,
- (__v32qi) _mm256_set1_epi8(__A),
- (__v32qi) __O);
+_mm256_mask_set1_epi8(__m256i __O, __mmask32 __M, char __A) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ __M, (__v32qi)_mm256_set1_epi8(__A), (__v32qi)__O);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi8 (__mmask32 __M, char __A)
-{
- return (__m256i) __builtin_ia32_selectb_256(__M,
- (__v32qi) _mm256_set1_epi8(__A),
- (__v32qi) _mm256_setzero_si256());
+_mm256_maskz_set1_epi8(__mmask32 __M, char __A) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ __M, (__v32qi)_mm256_set1_epi8(__A), (__v32qi)_mm256_setzero_si256());
}
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi16 (void const *__P)
-{
+static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi16(void const *__P) {
struct __loadu_epi16 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_epi16*)__P)->__v;
+ return ((const struct __loadu_epi16 *)__P)->__v;
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi16 (__m128i __W, __mmask8 __U, void const *__P)
-{
- return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_loadu_epi16(__m128i __W, __mmask8 __U, void const *__P) {
+ return (__m128i)__builtin_ia32_loaddquhi128_mask((const __v8hi *)__P,
+ (__v8hi)__W, (__mmask8)__U);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi16 (__mmask8 __U, void const *__P)
-{
- return (__m128i) __builtin_ia32_loaddquhi128_mask ((const __v8hi *) __P,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+_mm_maskz_loadu_epi16(__mmask8 __U, void const *__P) {
+ return (__m128i)__builtin_ia32_loaddquhi128_mask(
+ (const __v8hi *)__P, (__v8hi)_mm_setzero_si128(), (__mmask8)__U);
}
static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi16 (void const *__P)
-{
+_mm256_loadu_epi16(void const *__P) {
struct __loadu_epi16 {
__m256i_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_epi16*)__P)->__v;
+ return ((const struct __loadu_epi16 *)__P)->__v;
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi16 (__m256i __W, __mmask16 __U, void const *__P)
-{
- return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_loadu_epi16(__m256i __W, __mmask16 __U, void const *__P) {
+ return (__m256i)__builtin_ia32_loaddquhi256_mask(
+ (const __v16hi *)__P, (__v16hi)__W, (__mmask16)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi16 (__mmask16 __U, void const *__P)
-{
- return (__m256i) __builtin_ia32_loaddquhi256_mask ((const __v16hi *) __P,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+_mm256_maskz_loadu_epi16(__mmask16 __U, void const *__P) {
+ return (__m256i)__builtin_ia32_loaddquhi256_mask(
+ (const __v16hi *)__P, (__v16hi)_mm256_setzero_si256(), (__mmask16)__U);
}
-static __inline __m128i __DEFAULT_FN_ATTRS128
-_mm_loadu_epi8 (void const *__P)
-{
+static __inline __m128i __DEFAULT_FN_ATTRS128 _mm_loadu_epi8(void const *__P) {
struct __loadu_epi8 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_epi8*)__P)->__v;
+ return ((const struct __loadu_epi8 *)__P)->__v;
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_loadu_epi8 (__m128i __W, __mmask16 __U, void const *__P)
-{
- return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P,
- (__v16qi) __W,
- (__mmask16) __U);
+_mm_mask_loadu_epi8(__m128i __W, __mmask16 __U, void const *__P) {
+ return (__m128i)__builtin_ia32_loaddquqi128_mask(
+ (const __v16qi *)__P, (__v16qi)__W, (__mmask16)__U);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_loadu_epi8 (__mmask16 __U, void const *__P)
-{
- return (__m128i) __builtin_ia32_loaddquqi128_mask ((const __v16qi *) __P,
- (__v16qi)
- _mm_setzero_si128 (),
- (__mmask16) __U);
+_mm_maskz_loadu_epi8(__mmask16 __U, void const *__P) {
+ return (__m128i)__builtin_ia32_loaddquqi128_mask(
+ (const __v16qi *)__P, (__v16qi)_mm_setzero_si128(), (__mmask16)__U);
}
static __inline __m256i __DEFAULT_FN_ATTRS256
-_mm256_loadu_epi8 (void const *__P)
-{
+_mm256_loadu_epi8(void const *__P) {
struct __loadu_epi8 {
__m256i_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_epi8*)__P)->__v;
+ return ((const struct __loadu_epi8 *)__P)->__v;
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_loadu_epi8 (__m256i __W, __mmask32 __U, void const *__P)
-{
- return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P,
- (__v32qi) __W,
- (__mmask32) __U);
+_mm256_mask_loadu_epi8(__m256i __W, __mmask32 __U, void const *__P) {
+ return (__m256i)__builtin_ia32_loaddquqi256_mask(
+ (const __v32qi *)__P, (__v32qi)__W, (__mmask32)__U);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_loadu_epi8 (__mmask32 __U, void const *__P)
-{
- return (__m256i) __builtin_ia32_loaddquqi256_mask ((const __v32qi *) __P,
- (__v32qi)
- _mm256_setzero_si256 (),
- (__mmask32) __U);
+_mm256_maskz_loadu_epi8(__mmask32 __U, void const *__P) {
+ return (__m256i)__builtin_ia32_loaddquqi256_mask(
+ (const __v32qi *)__P, (__v32qi)_mm256_setzero_si256(), (__mmask32)__U);
}
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi16 (void *__P, __m128i __A)
-{
+static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi16(void *__P,
+ __m128i __A) {
struct __storeu_epi16 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_epi16*)__P)->__v = __A;
+ ((struct __storeu_epi16 *)__P)->__v = __A;
}
static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi16 (void *__P, __mmask8 __U, __m128i __A)
-{
- __builtin_ia32_storedquhi128_mask ((__v8hi *) __P,
- (__v8hi) __A,
- (__mmask8) __U);
+_mm_mask_storeu_epi16(void *__P, __mmask8 __U, __m128i __A) {
+ __builtin_ia32_storedquhi128_mask((__v8hi *)__P, (__v8hi)__A, (__mmask8)__U);
}
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi16 (void *__P, __m256i __A)
-{
+static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi16(void *__P,
+ __m256i __A) {
struct __storeu_epi16 {
__m256i_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_epi16*)__P)->__v = __A;
+ ((struct __storeu_epi16 *)__P)->__v = __A;
}
static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi16 (void *__P, __mmask16 __U, __m256i __A)
-{
- __builtin_ia32_storedquhi256_mask ((__v16hi *) __P,
- (__v16hi) __A,
- (__mmask16) __U);
+_mm256_mask_storeu_epi16(void *__P, __mmask16 __U, __m256i __A) {
+ __builtin_ia32_storedquhi256_mask((__v16hi *)__P, (__v16hi)__A,
+ (__mmask16)__U);
}
-static __inline void __DEFAULT_FN_ATTRS128
-_mm_storeu_epi8 (void *__P, __m128i __A)
-{
+static __inline void __DEFAULT_FN_ATTRS128 _mm_storeu_epi8(void *__P,
+ __m128i __A) {
struct __storeu_epi8 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_epi8*)__P)->__v = __A;
+ ((struct __storeu_epi8 *)__P)->__v = __A;
}
-static __inline__ void __DEFAULT_FN_ATTRS128
-_mm_mask_storeu_epi8 (void *__P, __mmask16 __U, __m128i __A)
-{
- __builtin_ia32_storedquqi128_mask ((__v16qi *) __P,
- (__v16qi) __A,
- (__mmask16) __U);
+static __inline__ void __DEFAULT_FN_ATTRS128 _mm_mask_storeu_epi8(void *__P,
+ __mmask16 __U,
+ __m128i __A) {
+ __builtin_ia32_storedquqi128_mask((__v16qi *)__P, (__v16qi)__A,
+ (__mmask16)__U);
}
-static __inline void __DEFAULT_FN_ATTRS256
-_mm256_storeu_epi8 (void *__P, __m256i __A)
-{
+static __inline void __DEFAULT_FN_ATTRS256 _mm256_storeu_epi8(void *__P,
+ __m256i __A) {
struct __storeu_epi8 {
__m256i_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_epi8*)__P)->__v = __A;
+ ((struct __storeu_epi8 *)__P)->__v = __A;
}
static __inline__ void __DEFAULT_FN_ATTRS256
-_mm256_mask_storeu_epi8 (void *__P, __mmask32 __U, __m256i __A)
-{
- __builtin_ia32_storedquqi256_mask ((__v32qi *) __P,
- (__v32qi) __A,
- (__mmask32) __U);
+_mm256_mask_storeu_epi8(void *__P, __mmask32 __U, __m256i __A) {
+ __builtin_ia32_storedquqi256_mask((__v32qi *)__P, (__v32qi)__A,
+ (__mmask32)__U);
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_test_epi8_mask(__m128i __A, __m128i __B) {
- return _mm_cmpneq_epi8_mask (_mm_and_si128(__A, __B), _mm_setzero_si128());
+ return _mm_cmpneq_epi8_mask(_mm_and_si128(__A, __B), _mm_setzero_si128());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_test_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
- return _mm_mask_cmpneq_epi8_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_si128());
+ return _mm_mask_cmpneq_epi8_mask(__U, _mm_and_si128(__A, __B),
+ _mm_setzero_si128());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_test_epi8_mask(__m256i __A, __m256i __B) {
- return _mm256_cmpneq_epi8_mask (_mm256_and_si256(__A, __B),
- _mm256_setzero_si256());
+ return _mm256_cmpneq_epi8_mask(_mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
-{
- return _mm256_mask_cmpneq_epi8_mask (__U, _mm256_and_si256(__A, __B),
- _mm256_setzero_si256());
+_mm256_mask_test_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
+ return _mm256_mask_cmpneq_epi8_mask(__U, _mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_test_epi16_mask (__m128i __A, __m128i __B)
-{
- return _mm_cmpneq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
+_mm_test_epi16_mask(__m128i __A, __m128i __B) {
+ return _mm_cmpneq_epi16_mask(_mm_and_si128(__A, __B), _mm_setzero_si128());
}
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_test_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
- return _mm_mask_cmpneq_epi16_mask (__U, _mm_and_si128 (__A, __B),
- _mm_setzero_si128());
+_mm_mask_test_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
+ return _mm_mask_cmpneq_epi16_mask(__U, _mm_and_si128(__A, __B),
+ _mm_setzero_si128());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_test_epi16_mask (__m256i __A, __m256i __B)
-{
- return _mm256_cmpneq_epi16_mask (_mm256_and_si256 (__A, __B),
- _mm256_setzero_si256 ());
+_mm256_test_epi16_mask(__m256i __A, __m256i __B) {
+ return _mm256_cmpneq_epi16_mask(_mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_mask_test_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
-{
- return _mm256_mask_cmpneq_epi16_mask (__U, _mm256_and_si256(__A, __B),
- _mm256_setzero_si256());
+_mm256_mask_test_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) {
+ return _mm256_mask_cmpneq_epi16_mask(__U, _mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_testn_epi8_mask(__m128i __A, __m128i __B) {
- return _mm_cmpeq_epi8_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
+ return _mm_cmpeq_epi8_mask(_mm_and_si128(__A, __B), _mm_setzero_si128());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi8_mask (__mmask16 __U, __m128i __A, __m128i __B)
-{
- return _mm_mask_cmpeq_epi8_mask (__U, _mm_and_si128 (__A, __B),
+_mm_mask_testn_epi8_mask(__mmask16 __U, __m128i __A, __m128i __B) {
+ return _mm_mask_cmpeq_epi8_mask(__U, _mm_and_si128(__A, __B),
_mm_setzero_si128());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi8_mask (__m256i __A, __m256i __B)
-{
- return _mm256_cmpeq_epi8_mask (_mm256_and_si256 (__A, __B),
- _mm256_setzero_si256());
+_mm256_testn_epi8_mask(__m256i __A, __m256i __B) {
+ return _mm256_cmpeq_epi8_mask(_mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi8_mask (__mmask32 __U, __m256i __A, __m256i __B)
-{
- return _mm256_mask_cmpeq_epi8_mask (__U, _mm256_and_si256 (__A, __B),
- _mm256_setzero_si256());
+_mm256_mask_testn_epi8_mask(__mmask32 __U, __m256i __A, __m256i __B) {
+ return _mm256_mask_cmpeq_epi8_mask(__U, _mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_testn_epi16_mask (__m128i __A, __m128i __B)
-{
- return _mm_cmpeq_epi16_mask (_mm_and_si128 (__A, __B), _mm_setzero_si128());
+_mm_testn_epi16_mask(__m128i __A, __m128i __B) {
+ return _mm_cmpeq_epi16_mask(_mm_and_si128(__A, __B), _mm_setzero_si128());
}
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_mask_testn_epi16_mask (__mmask8 __U, __m128i __A, __m128i __B)
-{
- return _mm_mask_cmpeq_epi16_mask (__U, _mm_and_si128(__A, __B), _mm_setzero_si128());
+_mm_mask_testn_epi16_mask(__mmask8 __U, __m128i __A, __m128i __B) {
+ return _mm_mask_cmpeq_epi16_mask(__U, _mm_and_si128(__A, __B),
+ _mm_setzero_si128());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_testn_epi16_mask (__m256i __A, __m256i __B)
-{
- return _mm256_cmpeq_epi16_mask (_mm256_and_si256(__A, __B),
- _mm256_setzero_si256());
+_mm256_testn_epi16_mask(__m256i __A, __m256i __B) {
+ return _mm256_cmpeq_epi16_mask(_mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_mask_testn_epi16_mask (__mmask16 __U, __m256i __A, __m256i __B)
-{
- return _mm256_mask_cmpeq_epi16_mask (__U, _mm256_and_si256 (__A, __B),
- _mm256_setzero_si256());
+_mm256_mask_testn_epi16_mask(__mmask16 __U, __m256i __A, __m256i __B) {
+ return _mm256_mask_cmpeq_epi16_mask(__U, _mm256_and_si256(__A, __B),
+ _mm256_setzero_si256());
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS128
-_mm_movepi8_mask (__m128i __A)
-{
- return (__mmask16) __builtin_ia32_cvtb2mask128 ((__v16qi) __A);
+_mm_movepi8_mask(__m128i __A) {
+ return (__mmask16)__builtin_ia32_cvtb2mask128((__v16qi)__A);
}
static __inline__ __mmask32 __DEFAULT_FN_ATTRS256
-_mm256_movepi8_mask (__m256i __A)
-{
- return (__mmask32) __builtin_ia32_cvtb2mask256 ((__v32qi) __A);
+_mm256_movepi8_mask(__m256i __A) {
+ return (__mmask32)__builtin_ia32_cvtb2mask256((__v32qi)__A);
}
static __inline__ __mmask8 __DEFAULT_FN_ATTRS128
-_mm_movepi16_mask (__m128i __A)
-{
- return (__mmask8) __builtin_ia32_cvtw2mask128 ((__v8hi) __A);
+_mm_movepi16_mask(__m128i __A) {
+ return (__mmask8)__builtin_ia32_cvtw2mask128((__v8hi)__A);
}
static __inline__ __mmask16 __DEFAULT_FN_ATTRS256
-_mm256_movepi16_mask (__m256i __A)
-{
- return (__mmask16) __builtin_ia32_cvtw2mask256 ((__v16hi) __A);
+_mm256_movepi16_mask(__m256i __A) {
+ return (__mmask16)__builtin_ia32_cvtw2mask256((__v16hi)__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_movm_epi8 (__mmask16 __A)
-{
- return (__m128i) __builtin_ia32_cvtmask2b128 (__A);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_movm_epi8(__mmask16 __A) {
+ return (__m128i)__builtin_ia32_cvtmask2b128(__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_movm_epi8 (__mmask32 __A)
-{
- return (__m256i) __builtin_ia32_cvtmask2b256 (__A);
+_mm256_movm_epi8(__mmask32 __A) {
+ return (__m256i)__builtin_ia32_cvtmask2b256(__A);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_movm_epi16 (__mmask8 __A)
-{
- return (__m128i) __builtin_ia32_cvtmask2w128 (__A);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_movm_epi16(__mmask8 __A) {
+ return (__m128i)__builtin_ia32_cvtmask2w128(__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_movm_epi16 (__mmask16 __A)
-{
- return (__m256i) __builtin_ia32_cvtmask2w256 (__A);
+_mm256_movm_epi16(__mmask16 __A) {
+ return (__m256i)__builtin_ia32_cvtmask2w256(__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastb_epi8 (__m128i __O, __mmask16 __M, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectb_128(__M,
- (__v16qi) _mm_broadcastb_epi8(__A),
- (__v16qi) __O);
+_mm_mask_broadcastb_epi8(__m128i __O, __mmask16 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ __M, (__v16qi)_mm_broadcastb_epi8(__A), (__v16qi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastb_epi8 (__mmask16 __M, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectb_128(__M,
- (__v16qi) _mm_broadcastb_epi8(__A),
- (__v16qi) _mm_setzero_si128());
+_mm_maskz_broadcastb_epi8(__mmask16 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectb_128(
+ __M, (__v16qi)_mm_broadcastb_epi8(__A), (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastb_epi8 (__m256i __O, __mmask32 __M, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectb_256(__M,
- (__v32qi) _mm256_broadcastb_epi8(__A),
- (__v32qi) __O);
+_mm256_mask_broadcastb_epi8(__m256i __O, __mmask32 __M, __m128i __A) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ __M, (__v32qi)_mm256_broadcastb_epi8(__A), (__v32qi)__O);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastb_epi8 (__mmask32 __M, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectb_256(__M,
- (__v32qi) _mm256_broadcastb_epi8(__A),
- (__v32qi) _mm256_setzero_si256());
+_mm256_maskz_broadcastb_epi8(__mmask32 __M, __m128i __A) {
+ return (__m256i)__builtin_ia32_selectb_256(
+ __M, (__v32qi)_mm256_broadcastb_epi8(__A),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_broadcastw_epi16 (__m128i __O, __mmask8 __M, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectw_128(__M,
- (__v8hi) _mm_broadcastw_epi16(__A),
- (__v8hi) __O);
+_mm_mask_broadcastw_epi16(__m128i __O, __mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ __M, (__v8hi)_mm_broadcastw_epi16(__A), (__v8hi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_broadcastw_epi16 (__mmask8 __M, __m128i __A)
-{
- return (__m128i)__builtin_ia32_selectw_128(__M,
- (__v8hi) _mm_broadcastw_epi16(__A),
- (__v8hi) _mm_setzero_si128());
+_mm_maskz_broadcastw_epi16(__mmask8 __M, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ __M, (__v8hi)_mm_broadcastw_epi16(__A), (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_broadcastw_epi16 (__m256i __O, __mmask16 __M, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectw_256(__M,
- (__v16hi) _mm256_broadcastw_epi16(__A),
- (__v16hi) __O);
+_mm256_mask_broadcastw_epi16(__m256i __O, __mmask16 __M, __m128i __A) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ __M, (__v16hi)_mm256_broadcastw_epi16(__A), (__v16hi)__O);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_broadcastw_epi16 (__mmask16 __M, __m128i __A)
-{
- return (__m256i)__builtin_ia32_selectw_256(__M,
- (__v16hi) _mm256_broadcastw_epi16(__A),
- (__v16hi) _mm256_setzero_si256());
+_mm256_maskz_broadcastw_epi16(__mmask16 __M, __m128i __A) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ __M, (__v16hi)_mm256_broadcastw_epi16(__A),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_set1_epi16 (__m256i __O, __mmask16 __M, short __A)
-{
- return (__m256i) __builtin_ia32_selectw_256 (__M,
- (__v16hi) _mm256_set1_epi16(__A),
- (__v16hi) __O);
+_mm256_mask_set1_epi16(__m256i __O, __mmask16 __M, short __A) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ __M, (__v16hi)_mm256_set1_epi16(__A), (__v16hi)__O);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_set1_epi16 (__mmask16 __M, short __A)
-{
- return (__m256i) __builtin_ia32_selectw_256(__M,
- (__v16hi)_mm256_set1_epi16(__A),
- (__v16hi) _mm256_setzero_si256());
+_mm256_maskz_set1_epi16(__mmask16 __M, short __A) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ __M, (__v16hi)_mm256_set1_epi16(__A), (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_set1_epi16 (__m128i __O, __mmask8 __M, short __A)
-{
- return (__m128i) __builtin_ia32_selectw_128(__M,
- (__v8hi) _mm_set1_epi16(__A),
- (__v8hi) __O);
+_mm_mask_set1_epi16(__m128i __O, __mmask8 __M, short __A) {
+ return (__m128i)__builtin_ia32_selectw_128(__M, (__v8hi)_mm_set1_epi16(__A),
+ (__v8hi)__O);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_set1_epi16 (__mmask8 __M, short __A)
-{
- return (__m128i) __builtin_ia32_selectw_128(__M,
- (__v8hi) _mm_set1_epi16(__A),
- (__v8hi) _mm_setzero_si128());
+_mm_maskz_set1_epi16(__mmask8 __M, short __A) {
+ return (__m128i)__builtin_ia32_selectw_128(__M, (__v8hi)_mm_set1_epi16(__A),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_permutexvar_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_permvarhi128((__v8hi) __B, (__v8hi) __A);
+_mm_permutexvar_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_permvarhi128((__v8hi)__B, (__v8hi)__A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_permutexvar_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_permutexvar_epi16(__A, __B),
- (__v8hi) _mm_setzero_si128());
+_mm_maskz_permutexvar_epi16(__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_permutexvar_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_permutexvar_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B)
-{
- return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
- (__v8hi)_mm_permutexvar_epi16(__A, __B),
- (__v8hi)__W);
+static __inline__ __m128i __DEFAULT_FN_ATTRS128 _mm_mask_permutexvar_epi16(
+ __m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128(
+ (__mmask8)__M, (__v8hi)_mm_permutexvar_epi16(__A, __B), (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_permutexvar_epi16 (__m256i __A, __m256i __B)
-{
- return (__m256i)__builtin_ia32_permvarhi256((__v16hi) __B, (__v16hi) __A);
+_mm256_permutexvar_epi16(__m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_permvarhi256((__v16hi)__B, (__v16hi)__A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_permutexvar_epi16 (__mmask16 __M, __m256i __A,
- __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_permutexvar_epi16(__A, __B),
- (__v16hi)_mm256_setzero_si256());
+_mm256_maskz_permutexvar_epi16(__mmask16 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_permutexvar_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
- __m256i __B)
-{
- return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
- (__v16hi)_mm256_permutexvar_epi16(__A, __B),
- (__v16hi)__W);
-}
-
-#define _mm_mask_alignr_epi8(W, U, A, B, N) \
- ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
- (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)(__m128i)(W)))
-
-#define _mm_maskz_alignr_epi8(U, A, B, N) \
- ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
- (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
- (__v16qi)_mm_setzero_si128()))
-
-#define _mm256_mask_alignr_epi8(W, U, A, B, N) \
- ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
- (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)(__m256i)(W)))
-
-#define _mm256_maskz_alignr_epi8(U, A, B, N) \
- ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
- (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
- (__v32qi)_mm256_setzero_si256()))
-
-#define _mm_dbsad_epu8(A, B, imm) \
- ((__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_permutexvar_epi16(
+ __m256i __W, __mmask16 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256(
+ (__mmask16)__M, (__v16hi)_mm256_permutexvar_epi16(__A, __B),
+ (__v16hi)__W);
+}
+
+#define _mm_mask_alignr_epi8(W, U, A, B, N) \
+ ((__m128i)__builtin_ia32_selectb_128( \
+ (__mmask16)(U), (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
+ (__v16qi)(__m128i)(W)))
+
+#define _mm_maskz_alignr_epi8(U, A, B, N) \
+ ((__m128i)__builtin_ia32_selectb_128( \
+ (__mmask16)(U), (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
+ (__v16qi)_mm_setzero_si128()))
+
+#define _mm256_mask_alignr_epi8(W, U, A, B, N) \
+ ((__m256i)__builtin_ia32_selectb_256( \
+ (__mmask32)(U), (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
+ (__v32qi)(__m256i)(W)))
+
+#define _mm256_maskz_alignr_epi8(U, A, B, N) \
+ ((__m256i)__builtin_ia32_selectb_256( \
+ (__mmask32)(U), (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
+ (__v32qi)_mm256_setzero_si256()))
+
+#define _mm_dbsad_epu8(A, B, imm) \
+ ((__m128i)__builtin_ia32_dbpsadbw128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(imm)))
-#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
- ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
- (__v8hi)(__m128i)(W)))
+#define _mm_mask_dbsad_epu8(W, U, A, B, imm) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
+ (__v8hi)(__m128i)(W)))
-#define _mm_maskz_dbsad_epu8(U, A, B, imm) \
- ((__m128i)__builtin_ia32_selectw_128((__mmask8)(U), \
- (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
- (__v8hi)_mm_setzero_si128()))
+#define _mm_maskz_dbsad_epu8(U, A, B, imm) \
+ ((__m128i)__builtin_ia32_selectw_128( \
+ (__mmask8)(U), (__v8hi)_mm_dbsad_epu8((A), (B), (imm)), \
+ (__v8hi)_mm_setzero_si128()))
-#define _mm256_dbsad_epu8(A, B, imm) \
- ((__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
+#define _mm256_dbsad_epu8(A, B, imm) \
+ ((__m256i)__builtin_ia32_dbpsadbw256((__v32qi)(__m256i)(A), \
(__v32qi)(__m256i)(B), (int)(imm)))
-#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
- ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
- (__v16hi)(__m256i)(W)))
+#define _mm256_mask_dbsad_epu8(W, U, A, B, imm) \
+ ((__m256i)__builtin_ia32_selectw_256( \
+ (__mmask16)(U), (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
+ (__v16hi)(__m256i)(W)))
-#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
- ((__m256i)__builtin_ia32_selectw_256((__mmask16)(U), \
- (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
- (__v16hi)_mm256_setzero_si256()))
+#define _mm256_maskz_dbsad_epu8(U, A, B, imm) \
+ ((__m256i)__builtin_ia32_selectw_256( \
+ (__mmask16)(U), (__v16hi)_mm256_dbsad_epu8((A), (B), (imm)), \
+ (__v16hi)_mm256_setzero_si256()))
static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_reduce_add_epi16(__m128i __W) {
@@ -2793,7 +2423,7 @@ _mm_reduce_min_epu16(__m128i __V) {
static __inline__ short __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_max_epi16(__mmask16 __M, __m128i __V) {
- __V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767-1), __M, __V);
+ __V = _mm_mask_mov_epi16(_mm_set1_epi16(-32767 - 1), __M, __V);
return __builtin_reduce_max((__v8hi)__V);
}
@@ -2881,7 +2511,7 @@ _mm256_reduce_min_epu16(__m256i __V) {
static __inline__ short __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_max_epi16(__mmask16 __M, __m256i __V) {
- __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767-1), __M, __V);
+ __V = _mm256_mask_mov_epi16(_mm256_set1_epi16(-32767 - 1), __M, __V);
return __builtin_reduce_max((__v16hi)__V);
}
@@ -2969,7 +2599,7 @@ _mm_reduce_min_epu8(__m128i __V) {
static __inline__ signed char __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_reduce_max_epi8(__mmask16 __M, __m128i __V) {
- __V = _mm_mask_mov_epi8(_mm_set1_epi8(-127-1), __M, __V);
+ __V = _mm_mask_mov_epi8(_mm_set1_epi8(-127 - 1), __M, __V);
return __builtin_reduce_max((__v16qs)__V);
}
@@ -3057,7 +2687,7 @@ _mm256_reduce_min_epu8(__m256i __V) {
static __inline__ signed char __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_reduce_max_epi8(__mmask32 __M, __m256i __V) {
- __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127-1), __M, __V);
+ __V = _mm256_mask_mov_epi8(_mm256_set1_epi8(-127 - 1), __M, __V);
return __builtin_reduce_max((__v32qs)__V);
}
More information about the cfe-commits
mailing list