[clang] [Headers][X86] Allow SLLDQ/SRLDQ byte shift intrinsics to be used in constexpr (PR #164166)
Ye Tian via cfe-commits
cfe-commits at lists.llvm.org
Wed Oct 22 07:57:46 PDT 2025
https://github.com/TianYe717 updated https://github.com/llvm/llvm-project/pull/164166
>From 73e38ec44c5d59e970919d1b4245cfd231375269 Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Sun, 19 Oct 2025 23:39:47 +0800
Subject: [PATCH 1/8] [Headers][X86] Support constexpr usage for _mm_srli_si128
and _mm256_srli_si256 intrinsics
---
clang/include/clang/Basic/BuiltinsX86.td | 9 ++++---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 34 ++++++++++++++++++++++++
clang/lib/AST/ExprConstant.cpp | 27 +++++++++++++++++++
clang/test/CodeGen/X86/avx2-builtins.c | 2 ++
clang/test/CodeGen/X86/sse2-builtins.c | 2 ++
5 files changed, 70 insertions(+), 4 deletions(-)
diff --git a/clang/include/clang/Basic/BuiltinsX86.td b/clang/include/clang/Basic/BuiltinsX86.td
index d03c778740ad3..7f19f025d6af3 100644
--- a/clang/include/clang/Basic/BuiltinsX86.td
+++ b/clang/include/clang/Basic/BuiltinsX86.td
@@ -282,8 +282,6 @@ let Features = "sse2", Attributes = [NoThrow, Const, RequiredVectorWidth<128>] i
def psllw128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, _Vector<8, short>)">;
def pslld128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, _Vector<4, int>)">;
def psllq128 : X86Builtin<"_Vector<2, long long int>(_Vector<2, long long int>, _Vector<2, long long int>)">;
- def pslldqi128_byteshift : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant int)">;
- def psrldqi128_byteshift : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant int)">;
}
let Features = "sse2",
@@ -302,6 +300,9 @@ let Features = "sse2",
def psrawi128 : X86Builtin<"_Vector<8, short>(_Vector<8, short>, int)">;
def psradi128 : X86Builtin<"_Vector<4, int>(_Vector<4, int>, int)">;
+
+ def pslldqi128_byteshift : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant int)">;
+ def psrldqi128_byteshift : X86Builtin<"_Vector<16, char>(_Vector<16, char>, _Constant int)">;
}
let Features = "sse3", Attributes = [NoThrow] in {
@@ -613,12 +614,10 @@ let Features = "avx2", Attributes = [NoThrow, Const, RequiredVectorWidth<256>] i
def psignw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<16, short>)">;
def psignd256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<8, int>)">;
def psllw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">;
- def pslldqi256_byteshift : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Constant int)">;
def pslld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">;
def psllq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">;
def psraw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">;
def psrad256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">;
- def psrldqi256_byteshift : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Constant int)">;
def psrlw256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, _Vector<8, short>)">;
def psrld256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, _Vector<4, int>)">;
def psrlq256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, _Vector<2, long long int>)">;
@@ -652,10 +651,12 @@ let Features = "avx2", Attributes = [NoThrow, Const, Constexpr, RequiredVectorWi
def psllwi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def pslldi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
def psllqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">;
+ def pslldqi256_byteshift : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Constant int)">;
def psrlwi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psrldi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
def psrlqi256 : X86Builtin<"_Vector<4, long long int>(_Vector<4, long long int>, int)">;
+ def psrldqi256_byteshift : X86Builtin<"_Vector<32, char>(_Vector<32, char>, _Constant int)">;
def psrawi256 : X86Builtin<"_Vector<16, short>(_Vector<16, short>, int)">;
def psradi256 : X86Builtin<"_Vector<8, int>(_Vector<8, int>, int)">;
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 0cb491063057c..faa7af4487dce 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3128,6 +3128,36 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_x86_psrldq_byteshift(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Shift = ImmAPS.getZExtValue();
+
+ const Pointer &Concat = S.Stk.pop<Pointer>();
+ if (!Concat.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned NumElems = Concat.getNumElems();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ PrimType ElemT = Concat.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != NumElems; ++I) {
+ if (I + Shift < NumElems)
+ Dst.elem<T>(I) = Concat.elem<T>(I + Shift);
+ else
+ Dst.elem<T>(I) = T();
+ }
+ });
+
+ Dst.initializeAllElements();
+
+ return true;
+}
+
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
uint32_t BuiltinID) {
if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
@@ -4149,6 +4179,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ return interp__builtin_x86_psrldq_byteshift(S, OpPC, Call, BuiltinID);
+
default:
S.FFDiag(S.Current->getLocation(OpPC),
diag::note_invalid_subexpr_in_const_expr)
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index e308c171ed551..4f8a980eb9fa6 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -12790,6 +12790,33 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(Elems.data(), NumElems), E);
}
+
+ case X86::BI__builtin_ia32_psrldqi128_byteshift:
+ case X86::BI__builtin_ia32_psrldqi256_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Concat;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Concat) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Concat.getVectorLength();
+ unsigned Shift = Imm.getZExtValue();
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned I = 0; I < VecLen; ++I) {
+ if (I + Shift < VecLen) {
+ ResultElements.push_back(Concat.getVectorElt(I + Shift));
+ } else {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
}
}
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index a505d70a98203..fa3d11fd0041c 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -1368,6 +1368,8 @@ __m256i test_mm256_srli_si256(__m256i a) {
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> zeroinitializer, <32 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50>
return _mm256_srli_si256(a, 3);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_srli_si256(((__m256i)(__v32qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 3), 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 0, 0));
+TEST_CONSTEXPR(match_v32qi(_mm256_srli_si256(((__m256i)(__v32qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 32), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_srlv_epi32(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_srlv_epi32
diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c
index ade7ef39a008a..de55afb15adaa 100644
--- a/clang/test/CodeGen/X86/sse2-builtins.c
+++ b/clang/test/CodeGen/X86/sse2-builtins.c
@@ -1565,6 +1565,8 @@ __m128i test_mm_srli_si128(__m128i A) {
// CHECK: shufflevector <16 x i8> %{{.*}}, <16 x i8> zeroinitializer, <16 x i32> <i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20>
return _mm_srli_si128(A, 5);
}
+TEST_CONSTEXPR(match_v16qi(_mm_srli_si128(((__m128i)(__v16qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), 5), 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v16qi(_mm_srli_si128(((__m128i)(__v16qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_srli_si128_2(__m128i A) {
// CHECK-LABEL: test_mm_srli_si128_2
>From 2e6cecc92352be459c009031d51649cb20bec69f Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Mon, 20 Oct 2025 09:49:10 +0800
Subject: [PATCH 2/8] [Headers][X86] Support constexpr usage for _mm_slli_si128
and _mm256_slli_si256 intrinsics
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 46 +++++++++++++++++++++---
clang/lib/AST/ExprConstant.cpp | 35 +++++++++++++++---
clang/test/CodeGen/X86/avx2-builtins.c | 2 ++
clang/test/CodeGen/X86/sse2-builtins.c | 2 ++
4 files changed, 76 insertions(+), 9 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index faa7af4487dce..42227aacbdca0 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3128,6 +3128,38 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_x86_pslldq_byteshift(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Shift = ImmAPS.getZExtValue();
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned NumElems = Src.getNumElems();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != NumElems; ++I) {
+ if (I < Shift) {
+ Dst.elem<T>(I) = T();
+ }
+ else {
+ Dst.elem<T>(I) = Src.elem<T>(I - Shift);
+ }
+ }
+ });
+
+ Dst.initializeAllElements();
+
+ return true;
+}
+
static bool interp__builtin_x86_psrldq_byteshift(InterpState &S, CodePtr OpPC,
const CallExpr *Call,
unsigned ID) {
@@ -3136,18 +3168,18 @@ static bool interp__builtin_x86_psrldq_byteshift(InterpState &S, CodePtr OpPC,
APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
uint64_t Shift = ImmAPS.getZExtValue();
- const Pointer &Concat = S.Stk.pop<Pointer>();
- if (!Concat.getFieldDesc()->isPrimitiveArray())
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
return false;
- unsigned NumElems = Concat.getNumElems();
+ unsigned NumElems = Src.getNumElems();
const Pointer &Dst = S.Stk.peek<Pointer>();
- PrimType ElemT = Concat.getFieldDesc()->getPrimType();
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
TYPE_SWITCH(ElemT, {
for (unsigned I = 0; I != NumElems; ++I) {
if (I + Shift < NumElems)
- Dst.elem<T>(I) = Concat.elem<T>(I + Shift);
+ Dst.elem<T>(I) = Src.elem<T>(I + Shift);
else
Dst.elem<T>(I) = T();
}
@@ -4179,6 +4211,10 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_vec_set_v4di:
return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ return interp__builtin_x86_pslldq_byteshift(S, OpPC, Call, BuiltinID);
+
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
return interp__builtin_x86_psrldq_byteshift(S, OpPC, Call, BuiltinID);
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 4f8a980eb9fa6..db9ab7f0b9e33 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -12791,23 +12791,50 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(Elems.data(), NumElems), E);
}
+ case X86::BI__builtin_ia32_pslldqi128_byteshift:
+ case X86::BI__builtin_ia32_pslldqi256_byteshift: {
+ assert(E->getNumArgs() == 2);
+
+ APValue Src;
+ APSInt Imm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info))
+ return false;
+
+ unsigned VecLen = Src.getVectorLength();
+ unsigned Shift = Imm.getZExtValue();
+
+ SmallVector<APValue> ResultElements;
+ for (unsigned I = 0; I != VecLen; ++I) {
+ if (I < Shift) {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ } else {
+ ResultElements.push_back(Src.getVectorElt(I - Shift));
+ }
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift: {
assert(E->getNumArgs() == 2);
- APValue Concat;
+ APValue Src;
APSInt Imm;
- if (!EvaluateAsRValue(Info, E->getArg(0), Concat) ||
+ if (!EvaluateAsRValue(Info, E->getArg(0), Src) ||
!EvaluateInteger(E->getArg(1), Imm, Info))
return false;
- unsigned VecLen = Concat.getVectorLength();
+ unsigned VecLen = Src.getVectorLength();
unsigned Shift = Imm.getZExtValue();
SmallVector<APValue> ResultElements;
for (unsigned I = 0; I < VecLen; ++I) {
if (I + Shift < VecLen) {
- ResultElements.push_back(Concat.getVectorElt(I + Shift));
+ ResultElements.push_back(Src.getVectorElt(I + Shift));
} else {
APSInt Zero(8, /*isUnsigned=*/true);
Zero = 0;
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index fa3d11fd0041c..79ba172ad0938 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -1225,6 +1225,8 @@ __m256i test_mm256_slli_si256(__m256i a) {
// CHECK: shufflevector <32 x i8> zeroinitializer, <32 x i8> %{{.*}}, <32 x i32> <i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60>
return _mm256_slli_si256(a, 3);
}
+TEST_CONSTEXPR(match_v32qi(_mm256_slli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 3), 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29));
+TEST_CONSTEXPR(match_v32qi(_mm256_slli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 32), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_sllv_epi32(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_sllv_epi32
diff --git a/clang/test/CodeGen/X86/sse2-builtins.c b/clang/test/CodeGen/X86/sse2-builtins.c
index de55afb15adaa..4faae3f1f35d1 100644
--- a/clang/test/CodeGen/X86/sse2-builtins.c
+++ b/clang/test/CodeGen/X86/sse2-builtins.c
@@ -1413,6 +1413,8 @@ __m128i test_mm_slli_si128(__m128i A) {
// CHECK: shufflevector <16 x i8> zeroinitializer, <16 x i8> %{{.*}}, <16 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26>
return _mm_slli_si128(A, 5);
}
+TEST_CONSTEXPR(match_v16qi(_mm_slli_si128(((__m128i)(__v16qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), 5), 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+TEST_CONSTEXPR(match_v16qi(_mm_slli_si128(((__m128i)(__v16qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_slli_si128_2(__m128i A) {
// CHECK-LABEL: test_mm_slli_si128_2
>From 736b2bb62ffceb890baf5a58272e01f833667a60 Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Mon, 20 Oct 2025 09:50:06 +0800
Subject: [PATCH 3/8] [Headers][X86] Support constexpr usage for _mm_srli_si128
and _mm256_srli_si256
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 42227aacbdca0..85e18f2436885 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3148,8 +3148,7 @@ static bool interp__builtin_x86_pslldq_byteshift(InterpState &S, CodePtr OpPC,
for (unsigned I = 0; I != NumElems; ++I) {
if (I < Shift) {
Dst.elem<T>(I) = T();
- }
- else {
+ } else {
Dst.elem<T>(I) = Src.elem<T>(I - Shift);
}
}
@@ -4214,7 +4213,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
return interp__builtin_x86_pslldq_byteshift(S, OpPC, Call, BuiltinID);
-
+
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
return interp__builtin_x86_psrldq_byteshift(S, OpPC, Call, BuiltinID);
>From 288fb51233239421419a46f946c6d7b7cc516e6f Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Wed, 22 Oct 2025 16:03:21 +0800
Subject: [PATCH 4/8] [Headers][X86] Address review comments for SLLDQ/SRLDQ
constexpr intrinsics
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 73 ++++++++++--------------
clang/lib/AST/ExprConstant.cpp | 32 ++++++-----
clang/test/CodeGen/X86/avx2-builtins.c | 8 +--
3 files changed, 53 insertions(+), 60 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 85e18f2436885..38e7365c8267b 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3128,9 +3128,10 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
-static bool interp__builtin_x86_pslldq_byteshift(InterpState &S, CodePtr OpPC,
+static bool interp__builtin_x86_byteshift(InterpState &S, CodePtr OpPC,
const CallExpr *Call,
- unsigned ID) {
+ unsigned ID,
+ llvm::function_ref<APInt(const Pointer &, PrimType ElemT, unsigned Lane, unsigned I, unsigned Shift)> Fn) {
assert(Call->getNumArgs() == 2);
APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
@@ -3144,45 +3145,13 @@ static bool interp__builtin_x86_pslldq_byteshift(InterpState &S, CodePtr OpPC,
const Pointer &Dst = S.Stk.peek<Pointer>();
PrimType ElemT = Src.getFieldDesc()->getPrimType();
- TYPE_SWITCH(ElemT, {
- for (unsigned I = 0; I != NumElems; ++I) {
- if (I < Shift) {
- Dst.elem<T>(I) = T();
- } else {
- Dst.elem<T>(I) = Src.elem<T>(I - Shift);
- }
+ for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ unsigned Base = Lane + I;
+ APSInt Result = APSInt(Fn(Src, ElemT, Lane, I, Shift));
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {Dst.elem<T>(Base) = static_cast<T>(Result);});
}
- });
-
- Dst.initializeAllElements();
-
- return true;
-}
-
-static bool interp__builtin_x86_psrldq_byteshift(InterpState &S, CodePtr OpPC,
- const CallExpr *Call,
- unsigned ID) {
- assert(Call->getNumArgs() == 2);
-
- APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
- uint64_t Shift = ImmAPS.getZExtValue();
-
- const Pointer &Src = S.Stk.pop<Pointer>();
- if (!Src.getFieldDesc()->isPrimitiveArray())
- return false;
-
- unsigned NumElems = Src.getNumElems();
- const Pointer &Dst = S.Stk.peek<Pointer>();
- PrimType ElemT = Src.getFieldDesc()->getPrimType();
-
- TYPE_SWITCH(ElemT, {
- for (unsigned I = 0; I != NumElems; ++I) {
- if (I + Shift < NumElems)
- Dst.elem<T>(I) = Src.elem<T>(I + Shift);
- else
- Dst.elem<T>(I) = T();
- }
- });
+ }
Dst.initializeAllElements();
@@ -4212,11 +4181,31 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
- return interp__builtin_x86_pslldq_byteshift(S, OpPC, Call, BuiltinID);
+ return interp__builtin_x86_byteshift(S, OpPC, Call, BuiltinID, [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I, unsigned Shift) {
+ APInt v;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ if(I < Shift) {
+ v = APInt(sizeof(T) * 8, 0);
+ } else {
+ v = APInt(sizeof(T) * 8, static_cast<uint64_t>(Src.elem<T>(Lane + I - Shift)));
+ }
+ });
+ return v;
+ });
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
- return interp__builtin_x86_psrldq_byteshift(S, OpPC, Call, BuiltinID);
+ return interp__builtin_x86_byteshift(S, OpPC, Call, BuiltinID, [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I, unsigned Shift) {
+ APInt v;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ if(I + Shift < 16) {
+ v = APInt(sizeof(T) * 8, static_cast<uint64_t>(Src.elem<T>(Lane + I + Shift)));
+ } else {
+ v = APInt(sizeof(T) * 8, 0);
+ }
+ });
+ return v;
+ });
default:
S.FFDiag(S.Current->getLocation(OpPC),
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index db9ab7f0b9e33..36983a382eb4d 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -12805,13 +12805,15 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
unsigned Shift = Imm.getZExtValue();
SmallVector<APValue> ResultElements;
- for (unsigned I = 0; I != VecLen; ++I) {
- if (I < Shift) {
- APSInt Zero(8, /*isUnsigned=*/true);
- Zero = 0;
- ResultElements.push_back(APValue(Zero));
- } else {
- ResultElements.push_back(Src.getVectorElt(I - Shift));
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I < Shift) {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ } else {
+ ResultElements.push_back(Src.getVectorElt(Lane + I - Shift));
+ }
}
}
@@ -12832,13 +12834,15 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
unsigned Shift = Imm.getZExtValue();
SmallVector<APValue> ResultElements;
- for (unsigned I = 0; I < VecLen; ++I) {
- if (I + Shift < VecLen) {
- ResultElements.push_back(Src.getVectorElt(I + Shift));
- } else {
- APSInt Zero(8, /*isUnsigned=*/true);
- Zero = 0;
- ResultElements.push_back(APValue(Zero));
+ for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
+ for (unsigned I = 0; I != 16; ++I) {
+ if (I + Shift < 16) {
+ ResultElements.push_back(Src.getVectorElt(Lane + I + Shift));
+ } else {
+ APSInt Zero(8, /*isUnsigned=*/true);
+ Zero = 0;
+ ResultElements.push_back(APValue(Zero));
+ }
}
}
diff --git a/clang/test/CodeGen/X86/avx2-builtins.c b/clang/test/CodeGen/X86/avx2-builtins.c
index 79ba172ad0938..827f4c0d76188 100644
--- a/clang/test/CodeGen/X86/avx2-builtins.c
+++ b/clang/test/CodeGen/X86/avx2-builtins.c
@@ -1225,8 +1225,8 @@ __m256i test_mm256_slli_si256(__m256i a) {
// CHECK: shufflevector <32 x i8> zeroinitializer, <32 x i8> %{{.*}}, <32 x i32> <i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60>
return _mm256_slli_si256(a, 3);
}
-TEST_CONSTEXPR(match_v32qi(_mm256_slli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 3), 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29));
-TEST_CONSTEXPR(match_v32qi(_mm256_slli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 32), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v32qi(_mm256_slli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 3), 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0, 0, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29));
+TEST_CONSTEXPR(match_v32qi(_mm256_slli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_sllv_epi32(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_sllv_epi32
@@ -1370,8 +1370,8 @@ __m256i test_mm256_srli_si256(__m256i a) {
// CHECK: shufflevector <32 x i8> %{{.*}}, <32 x i8> zeroinitializer, <32 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 32, i32 33, i32 34, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 48, i32 49, i32 50>
return _mm256_srli_si256(a, 3);
}
-TEST_CONSTEXPR(match_v32qi(_mm256_srli_si256(((__m256i)(__v32qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 3), 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, 0, 0));
-TEST_CONSTEXPR(match_v32qi(_mm256_srli_si256(((__m256i)(__v32qi){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}), 32), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
+TEST_CONSTEXPR(match_v32qi(_mm256_srli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 3), 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 0, 0, 0));
+TEST_CONSTEXPR(match_v32qi(_mm256_srli_si256(((__m256i)(__v32qi){1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}), 16), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0));
__m128i test_mm_srlv_epi32(__m128i a, __m128i b) {
// CHECK-LABEL: test_mm_srlv_epi32
>From 35bc02bbfa17c0ff6334c3aedb24bd8074b53a1f Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Wed, 22 Oct 2025 16:29:52 +0800
Subject: [PATCH 5/8] [NFC] Code format
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 64 ++++++++++++++----------
1 file changed, 37 insertions(+), 27 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 38e7365c8267b..7d87c2081c209 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3128,10 +3128,11 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
return true;
}
-static bool interp__builtin_x86_byteshift(InterpState &S, CodePtr OpPC,
- const CallExpr *Call,
- unsigned ID,
- llvm::function_ref<APInt(const Pointer &, PrimType ElemT, unsigned Lane, unsigned I, unsigned Shift)> Fn) {
+static bool interp__builtin_x86_byteshift(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
+ llvm::function_ref<APInt(const Pointer &, PrimType ElemT, unsigned Lane,
+ unsigned I, unsigned Shift)>
+ Fn) {
assert(Call->getNumArgs() == 2);
APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
@@ -3149,7 +3150,8 @@ static bool interp__builtin_x86_byteshift(InterpState &S, CodePtr OpPC,
for (unsigned I = 0; I != 16; ++I) {
unsigned Base = Lane + I;
APSInt Result = APSInt(Fn(Src, ElemT, Lane, I, Shift));
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {Dst.elem<T>(Base) = static_cast<T>(Result);});
+ INT_TYPE_SWITCH_NO_BOOL(ElemT,
+ { Dst.elem<T>(Base) = static_cast<T>(Result); });
}
}
@@ -4181,31 +4183,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
- return interp__builtin_x86_byteshift(S, OpPC, Call, BuiltinID, [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I, unsigned Shift) {
- APInt v;
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- if(I < Shift) {
- v = APInt(sizeof(T) * 8, 0);
- } else {
- v = APInt(sizeof(T) * 8, static_cast<uint64_t>(Src.elem<T>(Lane + I - Shift)));
- }
- });
- return v;
- });
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I,
+ unsigned Shift) {
+ APInt v;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ if (I < Shift) {
+ v = APInt(sizeof(T) * 8, 0);
+ } else {
+ v = APInt(sizeof(T) * 8,
+ static_cast<uint64_t>(Src.elem<T>(Lane + I - Shift)));
+ }
+ });
+ return v;
+ });
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
- return interp__builtin_x86_byteshift(S, OpPC, Call, BuiltinID, [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I, unsigned Shift) {
- APInt v;
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- if(I + Shift < 16) {
- v = APInt(sizeof(T) * 8, static_cast<uint64_t>(Src.elem<T>(Lane + I + Shift)));
- } else {
- v = APInt(sizeof(T) * 8, 0);
- }
- });
- return v;
- });
+ return interp__builtin_x86_byteshift(
+ S, OpPC, Call, BuiltinID,
+ [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I,
+ unsigned Shift) {
+ APInt v;
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ if (I + Shift < 16) {
+ v = APInt(sizeof(T) * 8,
+ static_cast<uint64_t>(Src.elem<T>(Lane + I + Shift)));
+ } else {
+ v = APInt(sizeof(T) * 8, 0);
+ }
+ });
+ return v;
+ });
default:
S.FFDiag(S.Current->getLocation(OpPC),
>From d8cf9dbf9ea6c3174f35c3c1ed707d0f29667b92 Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Wed, 22 Oct 2025 22:30:30 +0800
Subject: [PATCH 6/8] [Headers][X86] Simplify SLLDQ/SRLDQ lambda: hardcode bit
width for byte shift intrinsic
- Hardcode APInt width to 8 bits in SLLDQ/SRLDQ byte shift lambdas, matching the intrinsic's semantics.
- Added clarifying comments explaining lane width and element width relationship.
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 53 +++++++++++-------------
clang/lib/AST/ExprConstant.cpp | 2 +-
2 files changed, 26 insertions(+), 29 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 7d87c2081c209..ae04032c9f0ec 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -3130,13 +3130,13 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC,
static bool interp__builtin_x86_byteshift(
InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID,
- llvm::function_ref<APInt(const Pointer &, PrimType ElemT, unsigned Lane,
- unsigned I, unsigned Shift)>
+ llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I,
+ unsigned Shift)>
Fn) {
assert(Call->getNumArgs() == 2);
APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
- uint64_t Shift = ImmAPS.getZExtValue();
+ uint64_t Shift = ImmAPS.getZExtValue() & 0xff;
const Pointer &Src = S.Stk.pop<Pointer>();
if (!Src.getFieldDesc()->isPrimitiveArray())
@@ -3149,7 +3149,7 @@ static bool interp__builtin_x86_byteshift(
for (unsigned Lane = 0; Lane != NumElems; Lane += 16) {
for (unsigned I = 0; I != 16; ++I) {
unsigned Base = Lane + I;
- APSInt Result = APSInt(Fn(Src, ElemT, Lane, I, Shift));
+ APSInt Result = APSInt(Fn(Src, Lane, I, Shift));
INT_TYPE_SWITCH_NO_BOOL(ElemT,
{ Dst.elem<T>(Base) = static_cast<T>(Result); });
}
@@ -4183,38 +4183,35 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
+ // These SLLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
return interp__builtin_x86_byteshift(
S, OpPC, Call, BuiltinID,
- [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I,
- unsigned Shift) {
- APInt v;
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- if (I < Shift) {
- v = APInt(sizeof(T) * 8, 0);
- } else {
- v = APInt(sizeof(T) * 8,
- static_cast<uint64_t>(Src.elem<T>(Lane + I - Shift)));
- }
- });
- return v;
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I < Shift) {
+ return APInt(8, 0);
+ }
+ return APInt(
+ 8, static_cast<uint8_t>(Src.elem<uint8_t>(Lane + I - Shift)));
});
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
+ // These SRLDQ intrinsics always operate on byte elements (8 bits).
+ // The lane width is hardcoded to 16 to match the SIMD register size,
+ // but the algorithm processes one byte per iteration,
+ // so APInt(8, ...) is correct and intentional.
return interp__builtin_x86_byteshift(
S, OpPC, Call, BuiltinID,
- [](const Pointer &Src, PrimType ElemT, unsigned Lane, unsigned I,
- unsigned Shift) {
- APInt v;
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- if (I + Shift < 16) {
- v = APInt(sizeof(T) * 8,
- static_cast<uint64_t>(Src.elem<T>(Lane + I + Shift)));
- } else {
- v = APInt(sizeof(T) * 8, 0);
- }
- });
- return v;
+ [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
+ if (I + Shift < 16) {
+ return APInt(
+ 8, static_cast<uint8_t>(Src.elem<uint8_t>(Lane + I + Shift)));
+ }
+
+ return APInt(8, 0);
});
default:
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 36983a382eb4d..cd4561fdc943e 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -12802,7 +12802,7 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return false;
unsigned VecLen = Src.getVectorLength();
- unsigned Shift = Imm.getZExtValue();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
SmallVector<APValue> ResultElements;
for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
>From 51a835bafe5c5e58dd895b53dd3cdb23205eddd3 Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Wed, 22 Oct 2025 22:41:23 +0800
Subject: [PATCH 7/8] [Headers][X86] Mask shift immediate to 8 bits for byte
shift intrinsic
---
clang/lib/AST/ExprConstant.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index cd4561fdc943e..94833fdbf8d50 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -12831,7 +12831,7 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return false;
unsigned VecLen = Src.getVectorLength();
- unsigned Shift = Imm.getZExtValue();
+ unsigned Shift = Imm.getZExtValue() & 0xff;
SmallVector<APValue> ResultElements;
for (unsigned Lane = 0; Lane != VecLen; Lane += 16) {
>From bb5269174d6e51e81c5709cb67a58e4ac4121c3a Mon Sep 17 00:00:00 2001
From: Ye Tian <939808194 at qq.com>
Date: Wed, 22 Oct 2025 22:56:43 +0800
Subject: [PATCH 8/8] [Headers][X86] Address review comments for SLLDQ/SRLDQ
constexpr intrinsics
---
clang/lib/AST/ByteCode/InterpBuiltin.cpp | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index ae04032c9f0ec..40e97ea1e11e7 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -4193,8 +4193,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
if (I < Shift) {
return APInt(8, 0);
}
- return APInt(
- 8, static_cast<uint8_t>(Src.elem<uint8_t>(Lane + I - Shift)));
+ return APInt(8, Src.elem<uint8_t>(Lane + I - Shift));
});
case X86::BI__builtin_ia32_psrldqi128_byteshift:
@@ -4207,8 +4206,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call, BuiltinID,
[](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) {
if (I + Shift < 16) {
- return APInt(
- 8, static_cast<uint8_t>(Src.elem<uint8_t>(Lane + I + Shift)));
+ return APInt(8, Src.elem<uint8_t>(Lane + I + Shift));
}
return APInt(8, 0);
More information about the cfe-commits
mailing list