[llvm] [InstCombine] Bubble right spliced reverses of binop operands to their result (PR #179432)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 3 03:19:46 PST 2026
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/179432
>From 2271280cd7d9f6c3c111d98c281fe719d3f01c61 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 3 Feb 2026 18:16:58 +0800
Subject: [PATCH 1/3] Precommit tests
---
.../Transforms/InstCombine/vector-reverse.ll | 89 +++++++++++++++++++
1 file changed, 89 insertions(+)
diff --git a/llvm/test/Transforms/InstCombine/vector-reverse.ll b/llvm/test/Transforms/InstCombine/vector-reverse.ll
index ee60d31d1ddbe..33802be40d5d9 100644
--- a/llvm/test/Transforms/InstCombine/vector-reverse.ll
+++ b/llvm/test/Transforms/InstCombine/vector-reverse.ll
@@ -843,6 +843,95 @@ define <vscale x 4 x float> @reverse_unop_intrinsic_reverse_scalar_arg(<vscale x
ret <vscale x 4 x float> %powi.rev
}
+define <vscale x 4 x i32> @binop_reverse_splice(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 %offset) {
+; CHECK-LABEL: @binop_reverse_splice(
+; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]])
+; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
+; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B1:%.*]])
+; CHECK-NEXT: [[B:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[B_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET]])
+; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A]], [[B]]
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
+;
+ %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
+ %a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset)
+ %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %b)
+ %b.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %b.rev, <vscale x 4 x i32> poison, i32 %offset)
+ %add = add <vscale x 4 x i32> %a.splice, %b.splice
+ ret <vscale x 4 x i32> %add
+}
+
+; Negative test - splices have different offsets
+define <vscale x 4 x i32> @binop_reverse_splice_mismatched_offset(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 %offset1, i32 %offset2) {
+; CHECK-LABEL: @binop_reverse_splice_mismatched_offset(
+; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
+; CHECK-NEXT: [[A_SPLICE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET1:%.*]])
+; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
+; CHECK-NEXT: [[B_SPLICE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[B_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET2:%.*]])
+; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A_SPLICE]], [[B_SPLICE]]
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
+;
+ %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
+ %a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset1)
+ %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %b)
+ %b.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %b.rev, <vscale x 4 x i32> poison, i32 %offset2)
+ %add = add <vscale x 4 x i32> %a.splice, %b.splice
+ ret <vscale x 4 x i32> %add
+}
+
+; Negative test - %a.rev has multiple uses
+define <vscale x 4 x i32> @binop_reverse_splice_multiuse(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 %offset) {
+; CHECK-LABEL: @binop_reverse_splice_multiuse(
+; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A:%.*]])
+; CHECK-NEXT: [[A_SPLICE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
+; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B:%.*]])
+; CHECK-NEXT: [[B_SPLICE:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[B_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET]])
+; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A_SPLICE]], [[B_SPLICE]]
+; CHECK-NEXT: [[ADD2:%.*]] = add <vscale x 4 x i32> [[ADD]], [[A_REV]]
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD2]]
+;
+ %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
+ %a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset)
+ %b.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %b)
+ %b.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %b.rev, <vscale x 4 x i32> poison, i32 %offset)
+ %add = add <vscale x 4 x i32> %a.splice, %b.splice
+ %add2 = add <vscale x 4 x i32> %add, %a.rev
+ ret <vscale x 4 x i32> %add2
+}
+
+define <vscale x 4 x i32> @binop_reverse_splice_rhs_splat(<vscale x 4 x i32> %a, i32 %b, i32 %offset) {
+; CHECK-LABEL: @binop_reverse_splice_rhs_splat(
+; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]])
+; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
+; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
+; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A]], [[B_SPLAT]]
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
+;
+ %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
+ %a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset)
+ %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %add = add <vscale x 4 x i32> %a.splice, %b.splat
+ ret <vscale x 4 x i32> %add
+}
+
+define <vscale x 4 x i32> @binop_reverse_splice_lhs_splat(<vscale x 4 x i32> %a, i32 %b, i32 %offset) {
+; CHECK-LABEL: @binop_reverse_splice_lhs_splat(
+; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]])
+; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
+; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
+; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[B_SPLAT]], [[A]]
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
+;
+ %a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
+ %a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset)
+ %b.insert = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+ %b.splat = shufflevector <vscale x 4 x i32> %b.insert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %add = add <vscale x 4 x i32> %b.splat, %a.splice
+ ret <vscale x 4 x i32> %add
+}
+
declare void @use_nxv4i1(<vscale x 4 x i1>)
declare void @use_nxv4i32(<vscale x 4 x i32>)
declare void @use_nxv4f32(<vscale x 4 x float>)
>From bfd6f9630d307ebdcc49575f6ed2098dd8e1a0f0 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 3 Feb 2026 18:18:20 +0800
Subject: [PATCH 2/3] [InstCombine] Bubble left spliced reverses of binop
operands to their result
In #172961 we are trying to remove llvm.experimental.vp.reverse now that llvm.vector.splice.right supports variable offsets.
A VP reverse reverses the first EVL elements of the vector, e.g. 01234567 -> 210xxxxx when EVL=3, where x=poison.
This can now be represented by splice.right(reverse(V), poison, EVL):
01234567
-> 76543210 (reverse)
-> 210xxxxx (splice.right)
This PR implements the vp.reverse combines that pull through binops, but on vector.splice.right. We can then remove the vp.reverse intrinsic and its related combines soon after, once we migrate the loop vectorizer over.
---
.../InstCombine/InstructionCombining.cpp | 34 +++++++++++++++++++
.../Transforms/InstCombine/vector-reverse.ll | 26 +++++++-------
2 files changed, 46 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index a9904f7867e94..39ca23796cfe6 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2487,6 +2487,40 @@ Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
return createBinOpReverse(LHS, V2);
+ auto createBinOpSpliceReverse = [&](Value *X, Value *Y, Value *Offset) {
+ Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
+ if (auto *BO = dyn_cast<BinaryOperator>(V))
+ BO->copyIRFlags(&Inst);
+ Module *M = Inst.getModule();
+ Function *F = Intrinsic::getOrInsertDeclaration(
+ M, Intrinsic::vector_splice_right, V->getType());
+ return CallInst::Create(F, {Builder.CreateVectorReverse(V),
+ PoisonValue::get(X->getType()), Offset});
+ };
+ auto m_SpliceReverse = [](auto V, auto Offset) {
+ return m_Intrinsic<Intrinsic::vector_splice_right>(m_OneUse(m_VecReverse(V)),
+ m_Poison(), Offset);
+ };
+ Value *Offset;
+ if (match(LHS, m_SpliceReverse(m_Value(V1), m_Value(Offset)))) {
+ // Op(splice.right(rev(V1),poison,offset),splice.right(rev(V2),poison,offset))
+ // -> splice.right(rev(Op(V1, V2)), poison, offset)
+ if (match(RHS, m_SpliceReverse(m_Value(V2), m_Specific(Offset))) &&
+ (LHS->hasOneUse() || RHS->hasOneUse() ||
+ (LHS == RHS && LHS->hasNUses(2))))
+ return createBinOpSpliceReverse(V1, V2, Offset);
+
+ // Op(splice.right(rev(V1) poison, offset), RHSSplat))
+ // -> splice.right(rev(Op(V1, RHSSplat)), poison, offset)
+ if (LHS->hasOneUse() && isSplatValue(RHS))
+ return createBinOpSpliceReverse(V1, RHS, Offset);
+ }
+ // Op(LHSSplat, splice.right(rev(V2), poison, offset))
+ // -> splice.right(rev(Op(LHSSplat, V2)), poison, offset)
+ else if (isSplatValue(LHS) &&
+ match(RHS, m_OneUse(m_SpliceReverse(m_Value(V2), m_Value(Offset)))))
+ return createBinOpSpliceReverse(LHS, V2, Offset);
+
auto createBinOpVPReverse = [&](Value *X, Value *Y, Value *EVL) {
Value *V = Builder.CreateBinOp(Opcode, X, Y, Inst.getName());
if (auto *BO = dyn_cast<BinaryOperator>(V))
diff --git a/llvm/test/Transforms/InstCombine/vector-reverse.ll b/llvm/test/Transforms/InstCombine/vector-reverse.ll
index 33802be40d5d9..ba35e7a2437f1 100644
--- a/llvm/test/Transforms/InstCombine/vector-reverse.ll
+++ b/llvm/test/Transforms/InstCombine/vector-reverse.ll
@@ -845,12 +845,10 @@ define <vscale x 4 x float> @reverse_unop_intrinsic_reverse_scalar_arg(<vscale x
define <vscale x 4 x i32> @binop_reverse_splice(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, i32 %offset) {
; CHECK-LABEL: @binop_reverse_splice(
-; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]])
-; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
-; CHECK-NEXT: [[B_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[B1:%.*]])
-; CHECK-NEXT: [[B:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[B_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET]])
-; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A]], [[B]]
-; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
+; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]])
+; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
%a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset)
@@ -900,12 +898,12 @@ define <vscale x 4 x i32> @binop_reverse_splice_multiuse(<vscale x 4 x i32> %a,
define <vscale x 4 x i32> @binop_reverse_splice_rhs_splat(<vscale x 4 x i32> %a, i32 %b, i32 %offset) {
; CHECK-LABEL: @binop_reverse_splice_rhs_splat(
-; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]])
-; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A]], [[B_SPLAT]]
-; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
+; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B_SPLAT]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]])
+; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
%a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset)
@@ -917,12 +915,12 @@ define <vscale x 4 x i32> @binop_reverse_splice_rhs_splat(<vscale x 4 x i32> %a,
define <vscale x 4 x i32> @binop_reverse_splice_lhs_splat(<vscale x 4 x i32> %a, i32 %b, i32 %offset) {
; CHECK-LABEL: @binop_reverse_splice_lhs_splat(
-; CHECK-NEXT: [[A_REV:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[A1:%.*]])
-; CHECK-NEXT: [[A:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[A_REV]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[B_INSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[B_SPLAT]], [[A]]
-; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD1]]
+; CHECK-NEXT: [[ADD1:%.*]] = add <vscale x 4 x i32> [[B_SPLAT]], [[A:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[ADD1]])
+; CHECK-NEXT: [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.right.nxv4i32(<vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> poison, i32 [[OFFSET:%.*]])
+; CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
;
%a.rev = tail call <vscale x 4 x i32> @llvm.vector.reverse(<vscale x 4 x i32> %a)
%a.splice = tail call <vscale x 4 x i32> @llvm.vector.splice.right(<vscale x 4 x i32> %a.rev, <vscale x 4 x i32> poison, i32 %offset)
>From 1a92074c5a37c360a7f407588885a24d8e74ad62 Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Tue, 3 Feb 2026 19:19:26 +0800
Subject: [PATCH 3/3] clang-format
---
llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 39ca23796cfe6..053fb7f62d268 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2498,8 +2498,8 @@ Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
PoisonValue::get(X->getType()), Offset});
};
auto m_SpliceReverse = [](auto V, auto Offset) {
- return m_Intrinsic<Intrinsic::vector_splice_right>(m_OneUse(m_VecReverse(V)),
- m_Poison(), Offset);
+ return m_Intrinsic<Intrinsic::vector_splice_right>(
+ m_OneUse(m_VecReverse(V)), m_Poison(), Offset);
};
Value *Offset;
if (match(LHS, m_SpliceReverse(m_Value(V1), m_Value(Offset)))) {
More information about the llvm-commits
mailing list