[llvm] [ConstantFolding] Add folding for [de]interleave2, insert and extract (PR #141301)
Nikolay Panchenko via llvm-commits
llvm-commits at lists.llvm.org
Tue May 27 08:03:43 PDT 2025
https://github.com/npanchen updated https://github.com/llvm/llvm-project/pull/141301
>From 42566b4fb31f7c6833687d33de1e5ff3a347d20b Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <npanchen at modular.com>
Date: Fri, 23 May 2025 15:41:39 -0700
Subject: [PATCH 1/2] [ConstantFolding] Add folding for [de]interleave2, insert
and extract
The change adds folding for 4 vector intrinsics: `interleave2`,
`deinterleave2`, `vector_extract` and `vector_insert`.
For the last 2 intrinsics the change does not use `ShuffleVector` fold
mechanism as it's much simpler to construct result vector explicitly.
---
llvm/lib/Analysis/ConstantFolding.cpp | 78 +++++++++++++++++++
.../InstSimplify/ConstProp/vector-calls.ll | 50 ++++++++++++
2 files changed, 128 insertions(+)
create mode 100644 llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 412a0e8979193..d30f2fef69a54 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -1619,6 +1619,10 @@ bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
case Intrinsic::vector_reduce_smax:
case Intrinsic::vector_reduce_umin:
case Intrinsic::vector_reduce_umax:
+ case Intrinsic::vector_extract:
+ case Intrinsic::vector_insert:
+ case Intrinsic::vector_interleave2:
+ case Intrinsic::vector_deinterleave2:
// Target intrinsics
case Intrinsic::amdgcn_perm:
case Intrinsic::amdgcn_wave_reduce_umin:
@@ -3734,6 +3738,65 @@ static Constant *ConstantFoldFixedVectorCall(
}
return nullptr;
}
+ case Intrinsic::vector_extract: {
+ auto *Vec = dyn_cast<Constant>(Operands[0]);
+ auto *Idx = dyn_cast<ConstantInt>(Operands[1]);
+ if (!Vec || !Idx)
+ return nullptr;
+
+ unsigned NumElements = FVTy->getNumElements();
+ unsigned VecNumElements =
+ cast<FixedVectorType>(Vec->getType())->getNumElements();
+ // Extracting entire vector is nop
+ if (NumElements == VecNumElements)
+ return Vec;
+
+ unsigned StartingIndex = Idx->getZExtValue();
+ assert(StartingIndex + NumElements <= VecNumElements &&
+ "Cannot extract more elements than exist in the vector");
+ for (unsigned I = 0; I != NumElements; ++I)
+ Result[I] = Vec->getAggregateElement(StartingIndex + I);
+ return ConstantVector::get(Result);
+ }
+ case Intrinsic::vector_insert: {
+ auto *Vec = dyn_cast<Constant>(Operands[0]);
+ auto *SubVec = dyn_cast<Constant>(Operands[1]);
+ auto *Idx = dyn_cast<ConstantInt>(Operands[2]);
+ if (!Vec || !SubVec || !Idx)
+ return nullptr;
+
+ unsigned SubVecNumElements =
+ cast<FixedVectorType>(SubVec->getType())->getNumElements();
+ unsigned VecNumElements =
+ cast<FixedVectorType>(Vec->getType())->getNumElements();
+ unsigned IdxN = Idx->getZExtValue();
+ // Replacing entire vector with a subvec is nop
+ if (SubVecNumElements == VecNumElements)
+ return SubVec;
+
+ unsigned I = 0;
+ for (; I < IdxN; ++I)
+ Result[I] = Vec->getAggregateElement(I);
+ for (; I < IdxN + SubVecNumElements; ++I)
+ Result[I] = SubVec->getAggregateElement(I - IdxN);
+ for (; I < VecNumElements; ++I)
+ Result[I] = Vec->getAggregateElement(I);
+ return ConstantVector::get(Result);
+ }
+ case Intrinsic::vector_interleave2: {
+ auto *Vec0 = dyn_cast<Constant>(Operands[0]);
+ auto *Vec1 = dyn_cast<Constant>(Operands[1]);
+ if (!Vec0 || !Vec1)
+ return nullptr;
+
+ unsigned NumElements =
+ cast<FixedVectorType>(Vec0->getType())->getNumElements();
+ for (unsigned I = 0; I < NumElements; ++I) {
+ Result[2 * I] = Vec0->getAggregateElement(I);
+ Result[2 * I + 1] = Vec1->getAggregateElement(I);
+ }
+ return ConstantVector::get(Result);
+ }
default:
break;
}
@@ -3872,6 +3935,21 @@ ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
return nullptr;
return ConstantStruct::get(StTy, SinResult, CosResult);
}
+ case Intrinsic::vector_deinterleave2: {
+ auto *Vec = dyn_cast<Constant>(Operands[0]);
+ if (!Vec)
+ return nullptr;
+
+ unsigned NumElements =
+ cast<FixedVectorType>(Vec->getType())->getNumElements() / 2;
+ SmallVector<Constant *, 4> Res0(NumElements), Res1(NumElements);
+ for (unsigned I = 0; I < NumElements; ++I) {
+ Res0[I] = Vec->getAggregateElement(2 * I);
+ Res1[I] = Vec->getAggregateElement(2 * I + 1);
+ }
+ return ConstantStruct::get(StTy, ConstantVector::get(Res0),
+ ConstantVector::get(Res1));
+ }
default:
// TODO: Constant folding of vector intrinsics that fall through here does
// not work (e.g. overflow intrinsics)
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
new file mode 100644
index 0000000000000..f0bf610fa52aa
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=instsimplify,verify -S | FileCheck %s
+
+define <3 x i32> @fold_vector_extract() {
+; CHECK-LABEL: define <3 x i32> @fold_vector_extract() {
+; CHECK-NEXT: ret <3 x i32> <i32 3, i32 4, i32 5>
+;
+ %1 = call <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, i64 3)
+ ret <3 x i32> %1
+}
+
+define <8 x i32> @fold_vector_extract_nop() {
+; CHECK-LABEL: define <8 x i32> @fold_vector_extract_nop() {
+; CHECK-NEXT: ret <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+;
+ %1 = call <8 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, i64 0)
+ ret <8 x i32> %1
+}
+
+define <8 x i32> @fold_vector_insert() {
+; CHECK-LABEL: define <8 x i32> @fold_vector_insert() {
+; CHECK-NEXT: ret <8 x i32> <i32 9, i32 10, i32 11, i32 12, i32 5, i32 6, i32 7, i32 8>
+;
+ %1 = call <8 x i32> @llvm.vector.insert.v8i32(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <4 x i32> <i32 9, i32 10, i32 11, i32 12>, i64 0)
+ ret <8 x i32> %1
+}
+
+define <8 x i32> @fold_vector_insert_nop() {
+; CHECK-LABEL: define <8 x i32> @fold_vector_insert_nop() {
+; CHECK-NEXT: ret <8 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
+;
+ %1 = call <8 x i32> @llvm.vector.insert.v8i32(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <8 x i32> <i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>, i64 0)
+ ret <8 x i32> %1
+}
+
+define <8 x i32> @fold_vector_interleave2() {
+; CHECK-LABEL: define <8 x i32> @fold_vector_interleave2() {
+; CHECK-NEXT: ret <8 x i32> <i32 1, i32 5, i32 2, i32 6, i32 3, i32 7, i32 4, i32 8>
+;
+ %1 = call<8 x i32> @llvm.vector.interleave2.v8i32(<4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> <i32 5, i32 6, i32 7, i32 8>)
+ ret <8 x i32> %1
+}
+
+define {<4 x i32>, <4 x i32>} @fold_vector_deinterleav2() {
+; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @fold_vector_deinterleav2() {
+; CHECK-NEXT: ret { <4 x i32>, <4 x i32> } { <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> <i32 5, i32 6, i32 7, i32 8> }
+;
+ %1 = call {<4 x i32>, <4 x i32>} @llvm.vector.deinterleave2.v4i32.v8i32(<8 x i32> <i32 1, i32 5, i32 2, i32 6, i32 3, i32 7, i32 4, i32 8>)
+ ret {<4 x i32>, <4 x i32>} %1
+}
>From f0bc11de4fa871ce34bd15843cddc6fa6d01c058 Mon Sep 17 00:00:00 2001
From: Kolya Panchenko <npanchen at modular.com>
Date: Tue, 27 May 2025 11:03:04 -0400
Subject: [PATCH 2/2] Addressed comments
---
llvm/lib/Analysis/ConstantFolding.cpp | 29 +++++++++----
.../InstSimplify/ConstProp/vector-calls.ll | 42 ++++++++++++++++++-
2 files changed, 62 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index d30f2fef69a54..8ea5a304bd9c7 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -3747,15 +3747,21 @@ static Constant *ConstantFoldFixedVectorCall(
unsigned NumElements = FVTy->getNumElements();
unsigned VecNumElements =
cast<FixedVectorType>(Vec->getType())->getNumElements();
+ unsigned StartingIndex = Idx->getZExtValue();
// Extracting entire vector is nop
- if (NumElements == VecNumElements)
+ if (NumElements == VecNumElements && StartingIndex == 0)
return Vec;
- unsigned StartingIndex = Idx->getZExtValue();
- assert(StartingIndex + NumElements <= VecNumElements &&
- "Cannot extract more elements than exist in the vector");
- for (unsigned I = 0; I != NumElements; ++I)
- Result[I] = Vec->getAggregateElement(StartingIndex + I);
+ const unsigned NonPoisonNumElements =
+ std::min(StartingIndex + NumElements, VecNumElements);
+ for (unsigned I = StartingIndex; I < NonPoisonNumElements; ++I)
+ Result[I - StartingIndex] = Vec->getAggregateElement(I);
+
+ // Remaining elements are poison since they are out of bounds.
+ for (unsigned I = NonPoisonNumElements, E = StartingIndex + NumElements;
+ I < E; ++I)
+ Result[I - StartingIndex] = PoisonValue::get(FVTy->getElementType());
+
return ConstantVector::get(Result);
}
case Intrinsic::vector_insert: {
@@ -3771,9 +3777,15 @@ static Constant *ConstantFoldFixedVectorCall(
cast<FixedVectorType>(Vec->getType())->getNumElements();
unsigned IdxN = Idx->getZExtValue();
// Replacing entire vector with a subvec is nop
- if (SubVecNumElements == VecNumElements)
+ if (SubVecNumElements == VecNumElements && IdxN == 0)
return SubVec;
+ // Make sure indices are in the range [0, VecNumElements), otherwise the
+ // result is a poison value.
+ if (IdxN >= VecNumElements || IdxN + SubVecNumElements > VecNumElements ||
+ (SubVecNumElements % IdxN) != 0)
+ return PoisonValue::get(FVTy);
+
unsigned I = 0;
for (; I < IdxN; ++I)
Result[I] = Vec->getAggregateElement(I);
@@ -3941,7 +3953,8 @@ ConstantFoldStructCall(StringRef Name, Intrinsic::ID IntrinsicID,
return nullptr;
unsigned NumElements =
- cast<FixedVectorType>(Vec->getType())->getNumElements() / 2;
+ cast<VectorType>(Vec->getType())->getElementCount().getKnownMinValue() /
+ 2;
SmallVector<Constant *, 4> Res0(NumElements), Res1(NumElements);
for (unsigned I = 0; I < NumElements; ++I) {
Res0[I] = Vec->getAggregateElement(2 * I);
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
index f0bf610fa52aa..38c35f28cd11b 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt < %s -passes=instsimplify,verify -S | FileCheck %s
+; RUN: opt < %s -passes=instsimplify,verify -disable-verify -S | FileCheck %s
define <3 x i32> @fold_vector_extract() {
; CHECK-LABEL: define <3 x i32> @fold_vector_extract() {
@@ -9,6 +9,22 @@ define <3 x i32> @fold_vector_extract() {
ret <3 x i32> %1
}
+define <3 x i32> @fold_vector_extract_last_poison() {
+; CHECK-LABEL: define <3 x i32> @fold_vector_extract_last_poison() {
+; CHECK-NEXT: ret <3 x i32> <i32 6, i32 7, i32 poison>
+;
+ %1 = call <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, i64 6)
+ ret <3 x i32> %1
+}
+
+define <3 x i32> @fold_vector_extract_poison() {
+; CHECK-LABEL: define <3 x i32> @fold_vector_extract_poison() {
+; CHECK-NEXT: ret <3 x i32> poison
+;
+ %1 = call <3 x i32> @llvm.vector.extract.v3i32.v8i32(<8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, i64 8)
+ ret <3 x i32> %1
+}
+
define <8 x i32> @fold_vector_extract_nop() {
; CHECK-LABEL: define <8 x i32> @fold_vector_extract_nop() {
; CHECK-NEXT: ret <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -33,6 +49,22 @@ define <8 x i32> @fold_vector_insert_nop() {
ret <8 x i32> %1
}
+define <8 x i32> @fold_vector_insert_poison_idx_range() {
+; CHECK-LABEL: define <8 x i32> @fold_vector_insert_poison_idx_range() {
+; CHECK-NEXT: ret <8 x i32> poison
+;
+ %1 = call <8 x i32> @llvm.vector.insert.v8i32(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <6 x i32> <i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>, i64 6)
+ ret <8 x i32> %1
+}
+
+define <8 x i32> @fold_vector_insert_poison_large_idx() {
+; CHECK-LABEL: define <8 x i32> @fold_vector_insert_poison_large_idx() {
+; CHECK-NEXT: ret <8 x i32> poison
+;
+ %1 = call <8 x i32> @llvm.vector.insert.v8i32(<8 x i32> <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>, <6 x i32> <i32 9, i32 10, i32 11, i32 12, i32 13, i32 14>, i64 -2)
+ ret <8 x i32> %1
+}
+
define <8 x i32> @fold_vector_interleave2() {
; CHECK-LABEL: define <8 x i32> @fold_vector_interleave2() {
; CHECK-NEXT: ret <8 x i32> <i32 1, i32 5, i32 2, i32 6, i32 3, i32 7, i32 4, i32 8>
@@ -48,3 +80,11 @@ define {<4 x i32>, <4 x i32>} @fold_vector_deinterleav2() {
%1 = call {<4 x i32>, <4 x i32>} @llvm.vector.deinterleave2.v4i32.v8i32(<8 x i32> <i32 1, i32 5, i32 2, i32 6, i32 3, i32 7, i32 4, i32 8>)
ret {<4 x i32>, <4 x i32>} %1
}
+
+define {<vscale x 4 x i32>, <vscale x 4 x i32>} @fold_scalable_vector_deinterleav2() {
+; CHECK-LABEL: define { <vscale x 4 x i32>, <vscale x 4 x i32> } @fold_scalable_vector_deinterleav2() {
+; CHECK-NEXT: ret { <vscale x 4 x i32>, <vscale x 4 x i32> } zeroinitializer
+;
+ %1 = call {<vscale x 4 x i32>, <vscale x 4 x i32>} @llvm.vector.deinterleave2.v4i32.v8i32(<vscale x 8 x i32> zeroinitializer)
+ ret {<vscale x 4 x i32>, <vscale x 4 x i32>} %1
+}
More information about the llvm-commits
mailing list