[llvm] [ConstantFolding] Add constant folding for scalable vector interleave intrinsics. (PR #168668)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 19 11:26:14 PST 2025
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/168668
>From 741a54c6a4634d89f51537ed6b66e00ad744025e Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 18 Nov 2025 21:42:23 -0800
Subject: [PATCH 1/3] Pre-commit tests. NFC
---
.../InstSimplify/ConstProp/vector-calls.ll | 126 ++++++++++++++++++
1 file changed, 126 insertions(+)
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
index 848f0d17ff373..af418bedd651b 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
@@ -51,6 +51,24 @@ define <8 x i32> @fold_vector_interleave2() {
ret <8 x i32> %1
}
+define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
+; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 8 x i32> %1
+}
+
+define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
+; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 8 x i32> %1
+}
+
define <12 x i32> @fold_vector_interleave3() {
; CHECK-LABEL: define <12 x i32> @fold_vector_interleave3() {
; CHECK-NEXT: ret <12 x i32> <i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11, i32 4, i32 8, i32 12>
@@ -59,6 +77,24 @@ define <12 x i32> @fold_vector_interleave3() {
ret <12 x i32> %1
}
+define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
+; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 12 x i32> %1
+}
+
+define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
+; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 12 x i32> %1
+}
+
define <16 x i32> @fold_vector_interleave4() {
; CHECK-LABEL: define <16 x i32> @fold_vector_interleave4() {
; CHECK-NEXT: ret <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 2, i32 6, i32 10, i32 14, i32 3, i32 7, i32 11, i32 15, i32 4, i32 8, i32 12, i32 16>
@@ -67,6 +103,24 @@ define <16 x i32> @fold_vector_interleave4() {
ret <16 x i32> %1
}
+define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
+; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 16 x i32> %1
+}
+
+define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
+; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 16 x i32> %1
+}
+
define <20 x i32> @fold_vector_interleave5() {
; CHECK-LABEL: define <20 x i32> @fold_vector_interleave5() {
; CHECK-NEXT: ret <20 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 2, i32 6, i32 10, i32 14, i32 18, i32 3, i32 7, i32 11, i32 15, i32 19, i32 4, i32 8, i32 12, i32 16, i32 20>
@@ -75,6 +129,24 @@ define <20 x i32> @fold_vector_interleave5() {
ret <20 x i32> %1
}
+define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
+; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 20 x i32> %1
+}
+
+define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
+; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 20 x i32> %1
+}
+
define <24 x i32> @fold_vector_interleave6() {
; CHECK-LABEL: define <24 x i32> @fold_vector_interleave6() {
; CHECK-NEXT: ret <24 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24>
@@ -83,6 +155,24 @@ define <24 x i32> @fold_vector_interleave6() {
ret <24 x i32> %1
}
+define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
+; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 24 x i32> %1
+}
+
+define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
+; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 24 x i32> %1
+}
+
define <28 x i32> @fold_vector_interleave7() {
; CHECK-LABEL: define <28 x i32> @fold_vector_interleave7() {
; CHECK-NEXT: ret <28 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
@@ -91,6 +181,24 @@ define <28 x i32> @fold_vector_interleave7() {
ret <28 x i32> %1
}
+define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
+; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 28 x i32> %1
+}
+
+define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
+; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 28 x i32> %1
+}
+
define <32 x i32> @fold_vector_interleave8() {
; CHECK-LABEL: define <32 x i32> @fold_vector_interleave8() {
; CHECK-NEXT: ret <32 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32>
@@ -99,6 +207,24 @@ define <32 x i32> @fold_vector_interleave8() {
ret <32 x i32> %1
}
+define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
+; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
+ ret <vscale x 32 x i32> %1
+}
+
+define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
+; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
+ ret <vscale x 32 x i32> %1
+}
+
define {<4 x i32>, <4 x i32>} @fold_vector_deinterleave2() {
; CHECK-LABEL: define { <4 x i32>, <4 x i32> } @fold_vector_deinterleave2() {
; CHECK-NEXT: ret { <4 x i32>, <4 x i32> } { <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> <i32 5, i32 6, i32 7, i32 8> }
>From 3a8ad4bc74f385685b48a88988417c0ff2fce9aa Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 19 Nov 2025 09:58:43 -0800
Subject: [PATCH 2/3] fixup! negative test
---
.../Transforms/InstSimplify/ConstProp/vector-calls.ll | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
index af418bedd651b..b2af737b26937 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
@@ -69,6 +69,16 @@ define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
ret <vscale x 8 x i32> %1
}
+; Negative test.
+define <vscale x 8 x i32> @fold_scalable_vector_interleave2_mismatch_splat() {
+; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2_mismatch_splat() {
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 2))
+; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+;
+ %1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 2))
+ ret <vscale x 8 x i32> %1
+}
+
define <12 x i32> @fold_vector_interleave3() {
; CHECK-LABEL: define <12 x i32> @fold_vector_interleave3() {
; CHECK-NEXT: ret <12 x i32> <i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11, i32 4, i32 8, i32 12>
>From d7c17ce6c604303be0334859b72b148699d4a705 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 18 Nov 2025 21:52:22 -0800
Subject: [PATCH 3/3] [ConstantFolding] Add constant folding for scalable
vector interleave intrinsics.
---
llvm/lib/Analysis/ConstantFolding.cpp | 16 +++++++
.../InstSimplify/ConstProp/vector-calls.ll | 42 +++++++------------
2 files changed, 30 insertions(+), 28 deletions(-)
mode change 100755 => 100644 llvm/lib/Analysis/ConstantFolding.cpp
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
old mode 100755
new mode 100644
index a13df6c5bf552..e1bbcec306edc
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -4307,6 +4307,22 @@ static Constant *ConstantFoldScalableVectorCall(
return ConstantVector::getNullValue(SVTy);
break;
}
+ case Intrinsic::vector_interleave2:
+ case Intrinsic::vector_interleave3:
+ case Intrinsic::vector_interleave4:
+ case Intrinsic::vector_interleave5:
+ case Intrinsic::vector_interleave6:
+ case Intrinsic::vector_interleave7:
+ case Intrinsic::vector_interleave8: {
+ Constant *SplatVal = Operands[0]->getSplatValue();
+ if (!SplatVal)
+ return nullptr;
+
+ if (!llvm::all_equal(Operands))
+ return nullptr;
+
+ return ConstantVector::getSplat(SVTy->getElementCount(), SplatVal);
+ }
default:
break;
}
diff --git a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
index b2af737b26937..f40bb467cce2a 100644
--- a/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
+++ b/llvm/test/Transforms/InstSimplify/ConstProp/vector-calls.ll
@@ -53,8 +53,7 @@ define <8 x i32> @fold_vector_interleave2() {
define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 8 x i32> zeroinitializer
;
%1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 8 x i32> %1
@@ -62,8 +61,7 @@ define <vscale x 8 x i32> @fold_scalable_vector_interleave2() {
define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
; CHECK-LABEL: define <vscale x 8 x i32> @fold_scalable_vector_interleave2_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 8 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 8 x i32> splat (i32 1)
;
%1 = call <vscale x 8 x i32> @llvm.vector.interleave2.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 8 x i32> %1
@@ -89,8 +87,7 @@ define <12 x i32> @fold_vector_interleave3() {
define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 12 x i32> zeroinitializer
;
%1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 12 x i32> %1
@@ -98,8 +95,7 @@ define <vscale x 12 x i32> @fold_scalable_vector_interleave3() {
define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
; CHECK-LABEL: define <vscale x 12 x i32> @fold_scalable_vector_interleave3_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv12i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 12 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 12 x i32> splat (i32 1)
;
%1 = call <vscale x 12 x i32> @llvm.vector.interleave3.nxv8i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 12 x i32> %1
@@ -115,8 +111,7 @@ define <16 x i32> @fold_vector_interleave4() {
define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 16 x i32> zeroinitializer
;
%1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 16 x i32> %1
@@ -124,8 +119,7 @@ define <vscale x 16 x i32> @fold_scalable_vector_interleave4() {
define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
; CHECK-LABEL: define <vscale x 16 x i32> @fold_scalable_vector_interleave4_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 16 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 16 x i32> splat (i32 1)
;
%1 = call <vscale x 16 x i32> @llvm.vector.interleave4.nxv16i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 16 x i32> %1
@@ -141,8 +135,7 @@ define <20 x i32> @fold_vector_interleave5() {
define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 20 x i32> zeroinitializer
;
%1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 20 x i32> %1
@@ -150,8 +143,7 @@ define <vscale x 20 x i32> @fold_scalable_vector_interleave5() {
define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
; CHECK-LABEL: define <vscale x 20 x i32> @fold_scalable_vector_interleave5_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 20 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 20 x i32> splat (i32 1)
;
%1 = call <vscale x 20 x i32> @llvm.vector.interleave5.nxv20i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 20 x i32> %1
@@ -167,8 +159,7 @@ define <24 x i32> @fold_vector_interleave6() {
define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 24 x i32> zeroinitializer
;
%1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 24 x i32> %1
@@ -176,8 +167,7 @@ define <vscale x 24 x i32> @fold_scalable_vector_interleave6() {
define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
; CHECK-LABEL: define <vscale x 24 x i32> @fold_scalable_vector_interleave6_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 24 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 24 x i32> splat (i32 1)
;
%1 = call <vscale x 24 x i32> @llvm.vector.interleave6.nxv24i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 24 x i32> %1
@@ -193,8 +183,7 @@ define <28 x i32> @fold_vector_interleave7() {
define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 28 x i32> zeroinitializer
;
%1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 28 x i32> %1
@@ -202,8 +191,7 @@ define <vscale x 28 x i32> @fold_scalable_vector_interleave7() {
define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
; CHECK-LABEL: define <vscale x 28 x i32> @fold_scalable_vector_interleave7_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 28 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 28 x i32> splat (i32 1)
;
%1 = call <vscale x 28 x i32> @llvm.vector.interleave7.nxv28i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 28 x i32> %1
@@ -219,8 +207,7 @@ define <32 x i32> @fold_vector_interleave8() {
define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
-; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 32 x i32> zeroinitializer
;
%1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> zeroinitializer)
ret <vscale x 32 x i32> %1
@@ -228,8 +215,7 @@ define <vscale x 32 x i32> @fold_scalable_vector_interleave8() {
define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
; CHECK-LABEL: define <vscale x 32 x i32> @fold_scalable_vector_interleave8_splat() {
-; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
-; CHECK-NEXT: ret <vscale x 32 x i32> [[TMP1]]
+; CHECK-NEXT: ret <vscale x 32 x i32> splat (i32 1)
;
%1 = call <vscale x 32 x i32> @llvm.vector.interleave8.nxv32i32(<vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1), <vscale x 4 x i32> splat (i32 1))
ret <vscale x 32 x i32> %1
More information about the llvm-commits
mailing list