[llvm] [ConstantFolding] Add constant folding for scalable vector interleave intrinsics. (PR #168668)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 18 21:57:03 PST 2025
https://github.com/topperc created https://github.com/llvm/llvm-project/pull/168668
We can constant fold interleave of identical splat vectors to a larger splat vector.
>From 741a54c6a4634d89f51537ed6b66e00ad744025e Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 18 Nov 2025 21:42:23 -0800
Subject: [PATCH 1/2] Pre-commit tests. NFC
---
.../InstSimplify/ConstProp/vector-calls.ll | 126 ++++++++++++++++++
1 file changed, 126 insertions(+)
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
index 848f0d17ff373..af418bedd651b 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
@@ -51,6 +51,24 @@ define <8 x i32> @fold_vector_interleave2() {
ret <8 x i32> %1
}
+define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
+; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 8 x i32> %1
+}
+
+define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
+; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 8 x i32> %1
+}
+
define <12 x i32> @fold_vector_interleave3() {
; CHECK-LABEL: define <12 x i32> @fold_vector_interleave3() {
; CHECK-NEXT: ret <12 x i32> <i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11, i32 4, i32 8, i32 12>
@@ -59,6 +77,24 @@ define <12 x i32> @fold_vector_interleave3() {
ret <12 x i32> %1
}
+define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
+; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 12 x i32> %1
+}
+
+define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
+; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 12 x i32> %1
+}
+
define <16 x i32> @fold_vector_interleave4() {
; CHECK-LABEL: define <16 x i32> @fold_vector_interleave4() {
; CHECK-NEXT: ret <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15, i32 4, i32 8, i32 12, i32 16>
@@ -67,6 +103,24 @@ define <16 x i32> @fold_vector_interleave4() {
ret <16 x i32> %1
}
+define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
+; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 16 x i32> %1
+}
+
+define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
+; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 16 x i32> %1
+}
+
define <20 x i32> @fold_vector_interleave5() {
; CHECK-LABEL: define <20 x i32> @fold_vector_interleave5() {
; CHECK-NEXT: ret <20 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 2, i32 6, i32 10, i32 14, i32 18, i32 3, i32 7, i32 11, i32 15, i32 19, i32 4, i32 8, i32 12, i32 16, i32 20>
@@ -75,6 +129,24 @@ define <20 x i32> @fold_vector_interleave5() {
ret <20 x i32> %1
}
+define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
+; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 20 x i32> %1
+}
+
+define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
+; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 20 x i32> %1
+}
+
define <24 x i32> @fold_vector_interleave6() {
; CHECK-LABEL: define <24 x i32> @fold_vector_interleave6() {
; CHECK-NEXT: ret <24 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24>
@@ -83,6 +155,24 @@ define <24 x i32> @fold_vector_interleave6() {
ret <24 x i32> %1
}
+define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
+; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 24 x i32> %1
+}
+
+define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
+; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 24 x i32> %1
+}
+
define <28 x i32> @fold_vector_interleave7() {
; CHECK-LABEL: define <28 x i32> @fold_vector_interleave7() {
; CHECK-NEXT: ret <28 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
@@ -91,6 +181,24 @@ define <28 x i32> @fold_vector_interleave7() {
ret <28 x i32> %1
}
+define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
+; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 28 x i32> %1
+}
+
+define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
+; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 28 x i32> %1
+}
+
define <32 x i32> @fold_vector_interleave8() {
; CHECK-LABEL: define <32 x i32> @fold_vector_interleave8() {
; CHECK-NEXT: ret <32 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32>
@@ -99,6 +207,24 @@ define <32 x i32> @fold_vector_interleave8() {
ret <32 x i32> %1
}
+define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
+; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 32 x i32> %1
+}
+
+define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
+; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 32 x i32> %1
+}
+
define {<4 x i32>, <4 x i32>} @fold_vector_deinterleave2() {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @fold_vector_deinterleave2() {
; CHECK-NEXT: ret { <4 x i32>, <4 x i32> } { <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> <i32 5, i32 6, i32 7, i32 8> }
>From d49c0db036c64c00ab3912e52469c138f72a4270 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 18 Nov 2025 21:52:22 -0800
Subject: [PATCH 2/2] [ConstantFolding] Add constant folding for scalable
vector interleave intrinsics.
---
llvm/lib/Analysis/ConstantFolding.cpp | 16 +++++++
.../InstSimplify/ConstProp/vector-calls.ll | 42 +++++++------------
2 files changed, 30 insertions(+), 28 deletions(-)
mode change 100755 => 100644 llvm/lib/Analysis/ConstantFolding.cpp
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
old mode 100755
new mode 100644
index a13df6c5bf552..e1bbcec306edc
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -4307,6 +4307,22 @@ static Constant *ConstantFoldScalableVectorCall(
return ConstantVector::getNullValue(SVTy);
break;
}
+ case Intrinsic::vector_interleave2:
+ case Intrinsic::vector_interleave3:
+ case Intrinsic::vector_interleave4:
+ case Intrinsic::vector_interleave5:
+ case Intrinsic::vector_interleave6:
+ case Intrinsic::vector_interleave7:
+ case Intrinsic::vector_interleave8: {
+ Constant *SplatVal = Operands[0]->getSplatValue();
+ if (!SplatVal)
+ return nullptr;
+
+ if (!llvm::all_equal(Operands))
+ return nullptr;
+
+ return ConstantVector::getSplat(SVTy->getElementCount(), SplatVal);
+ }
default:
break;
}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
index af418bedd651b..2d8a444141a2b 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
@@ -53,8 +53,7 @@ define <8 x i32> @fold_vector_interleave2() {
define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 8 x i32> zeroinitializer
;
%1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 8 x i32> %1
@@ -62,8 +61,7 @@ define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 8 x i32> splat (i32 1)
;
%1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 8 x i32> %1
@@ -79,8 +77,7 @@ define <12 x i32> @fold_vector_interleave3() {
define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 12 x i32> zeroinitializer
;
%1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 12 x i32> %1
@@ -88,8 +85,7 @@ define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 12 x i32> splat (i32 1)
;
%1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 12 x i32> %1
@@ -105,8 +101,7 @@ define <16 x i32> @fold_vector_interleave4() {
define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 16 x i32> zeroinitializer
;
%1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 16 x i32> %1
@@ -114,8 +109,7 @@ define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 16 x i32> splat (i32 1)
;
%1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 16 x i32> %1
@@ -131,8 +125,7 @@ define <20 x i32> @fold_vector_interleave5() {
define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 20 x i32> zeroinitializer
;
%1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 20 x i32> %1
@@ -140,8 +133,7 @@ define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 20 x i32> splat (i32 1)
;
%1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 20 x i32> %1
@@ -157,8 +149,7 @@ define <24 x i32> @fold_vector_interleave6() {
define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 24 x i32> zeroinitializer
;
%1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 24 x i32> %1
@@ -166,8 +157,7 @@ define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 24 x i32> splat (i32 1)
;
%1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 24 x i32> %1
@@ -183,8 +173,7 @@ define <28 x i32> @fold_vector_interleave7() {
define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 28 x i32> zeroinitializer
;
%1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 28 x i32> %1
@@ -192,8 +181,7 @@ define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 28 x i32> splat (i32 1)
;
%1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 28 x i32> %1
@@ -209,8 +197,7 @@ define <32 x i32> @fold_vector_interleave8() {
define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 32 x i32> zeroinitializer
;
%1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 32 x i32> %1
@@ -218,8 +205,7 @@ define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 32 x i32> splat (i32 1)
;
%1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 32 x i32> %1
More information about the llvm-commits
mailing list