[llvm] [InstCombine] Constant fold binops through `vector.insert` (PR #164624)

Benjamin Maxwell via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 22 06:20:55 PDT 2025


https://github.com/MacDue created https://github.com/llvm/llvm-project/pull/164624

This patch improves constant folding through `llvm.vector.insert`. It does not change anything for fixed-length vectors (which can already be folded to ConstantVectors for these cases), but folds scalable vectors that otherwise would not be folded.

These folds preserve the destination vector (which could be undef or poison), giving targets more freedom in lowering the operations.

>From 513b92b7a54c93f33de21cadd7eaf909c7c8e9ce Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Wed, 22 Oct 2025 13:15:00 +0000
Subject: [PATCH 1/2] Precommit tests

---
 .../InstCombine/constant-vector-insert.ll     | 137 ++++++++++++++++++
 1 file changed, 137 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/constant-vector-insert.ll

diff --git a/llvm/test/Transforms/InstCombine/constant-vector-insert.ll b/llvm/test/Transforms/InstCombine/constant-vector-insert.ll
new file mode 100644
index 0000000000000..088cd8702ccb7
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/constant-vector-insert.ll
@@ -0,0 +1,137 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine %s | FileCheck %s
+
+define <vscale x 4 x i32> @insert_div() {
+; CHECK-LABEL: @insert_div(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 9), i64 0)
+; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[TMP0]], splat (i32 3)
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 9), i64 0)
+  %div = udiv <vscale x 4 x i32> %0, splat (i32 3)
+  ret <vscale x 4 x i32> %div
+}
+
+define <vscale x 4 x i32> @insert_div_splat_lhs() {
+; CHECK-LABEL: @insert_div_splat_lhs(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 9), i64 0)
+; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> splat (i32 3), [[TMP0]]
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 9), i64 0)
+  %div = udiv <vscale x 4 x i32> splat (i32 3), %0
+  ret <vscale x 4 x i32> %div
+}
+
+define <vscale x 4 x i32> @insert_div_mixed_splat() {
+; CHECK-LABEL: @insert_div_mixed_splat(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 18), <4 x i32> splat (i32 9), i64 0)
+; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[TMP0]], splat (i32 3)
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 18), <4 x i32> splat (i32 9), i64 0)
+  %div = udiv <vscale x 4 x i32> %0, splat (i32 3)
+  ret <vscale x 4 x i32> %div
+}
+
+define <vscale x 4 x i32> @insert_mul() {
+; CHECK-LABEL: @insert_mul(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 1), i64 4)
+; CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 4 x i32> [[TMP0]], splat (i32 7)
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[MUL]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 1), i64 4)
+  %mul = mul <vscale x 4 x i32> %0, splat (i32 7)
+  ret <vscale x 4 x i32> %mul
+}
+
+define <vscale x 4 x i32> @insert_add() {
+; CHECK-LABEL: @insert_add(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 5), i64 0)
+; CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 4 x i32> [[TMP0]], splat (i32 11)
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 5), i64 0)
+  %add = add <vscale x 4 x i32> %0, splat (i32 11)
+  ret <vscale x 4 x i32> %add
+}
+
+define <vscale x 8 x i32> @insert_add_scalable_subvector() {
+; CHECK-LABEL: @insert_add_scalable_subvector(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> splat (i32 16), <vscale x 4 x i32> splat (i32 -8), i64 0)
+; CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 8 x i32> [[TMP0]], splat (i32 4)
+; CHECK-NEXT:    ret <vscale x 8 x i32> [[ADD]]
+;
+entry:
+  %0 = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> splat(i32 16), <vscale x 4 x i32> splat (i32 -8), i64 0)
+  %add = add <vscale x 8 x i32> %0, splat (i32 4)
+  ret <vscale x 8 x i32> %add
+}
+
+define <vscale x 4 x i32> @insert_sub() {
+; CHECK-LABEL: @insert_sub(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 11), i64 8)
+; CHECK-NEXT:    [[SUB:%.*]] = add <vscale x 4 x i32> [[TMP0]], splat (i32 -11)
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[SUB]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 11), i64 8)
+  %sub = add <vscale x 4 x i32> %0, splat (i32 -11)
+  ret <vscale x 4 x i32> %sub
+}
+
+define <vscale x 4 x i32> @insert_and_partially_undef() {
+; CHECK-LABEL: @insert_and_partially_undef(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> splat (i32 6), i64 0)
+; CHECK-NEXT:    [[AND:%.*]] = and <vscale x 4 x i32> [[TMP0]], splat (i32 4)
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[AND]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> splat (i32 6), i64 0)
+  %and = and <vscale x 4 x i32> %0, splat (i32 4)
+  ret <vscale x 4 x i32> %and
+}
+
+define <vscale x 4 x i32> @insert_fold_chain() {
+; CHECK-LABEL: @insert_fold_chain(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 21), <4 x i32> splat (i32 12), i64 0)
+; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[TMP0]], splat (i32 3)
+; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw <vscale x 4 x i32> [[DIV]], splat (i32 4)
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 21), <4 x i32> splat (i32 12), i64 0)
+  %div = udiv <vscale x 4 x i32> %0, splat (i32 3)
+  %add = add <vscale x 4 x i32> %div, splat (i32 4)
+  ret <vscale x 4 x i32> %add
+}
+
+; TODO: This could be folded more.
+define <vscale x 4 x i32> @insert_add_both_insert_vector() {
+; CHECK-LABEL: @insert_add_both_insert_vector(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 10), <4 x i32> splat (i32 5), i64 0)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 -1), <4 x i32> splat (i32 2), i64 0)
+; CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 4 x i32> [[TMP0]], [[TMP1]]
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
+;
+entry:
+  %0 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat(i32 10), <4 x i32> splat (i32 5), i64 0)
+  %1 = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat(i32 -1), <4 x i32> splat (i32 2), i64 0)
+  %add = add <vscale x 4 x i32> %0, %1
+  ret <vscale x 4 x i32> %add
+}

>From 976fc0b6bde584c4909086da8281b565eddb88d4 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Wed, 22 Oct 2025 13:16:08 +0000
Subject: [PATCH 2/2] [InstCombine] Constant fold binops through
 `vector.insert`

This patch improves constant folding through `llvm.vector.insert`. It
does not change anything for fixed-length vectors (which can already be
folded to ConstantVectors for these cases), but folds scalable vectors
that otherwise would not be folded.

These folds preserve the destination vector (which could be undef or
poison), giving targets more freedom in lowering the operations.
---
 .../InstCombine/InstructionCombining.cpp      | 56 +++++++++++++++++++
 .../InstCombine/constant-vector-insert.ll     | 28 +++-------
 2 files changed, 65 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 3f11cae143b81..05e8673b25433 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2323,6 +2323,32 @@ Constant *InstCombinerImpl::unshuffleConstant(ArrayRef<int> ShMask, Constant *C,
   return ConstantVector::get(NewVecC);
 }
 
+// Match a vector.insert where both the destination and subvector are constant.
+static bool matchConstantSubVector(Value *V, Constant *&Dest,
+                                   Constant *&SubVector, Value *&Idx) {
+  return match(V, m_Intrinsic<Intrinsic::vector_insert>(
+                      m_Constant(Dest), m_Constant(SubVector), m_Value(Idx)));
+}
+
+static Constant *matchConstantSplat(Value *V) {
+  Constant *C;
+  if (match(V, m_Constant(C)))
+    return C->getSplatValue();
+  return nullptr;
+}
+
+// Get the result of `Vector Op Splat` (or Splat Op Vector if \p SplatLHS).
+static Constant *constantFoldBinOpWithSplat(unsigned Opcode, Constant *Vector,
+                                            Constant *Splat, bool SplatLHS,
+                                            const DataLayout &DL) {
+  ElementCount EC = cast<VectorType>(Vector->getType())->getElementCount();
+  Constant *LHS = ConstantVector::getSplat(EC, Splat);
+  Constant *RHS = Vector;
+  if (!SplatLHS)
+    std::swap(LHS, RHS);
+  return ConstantFoldBinaryOpOperands(Opcode, LHS, RHS, DL);
+}
+
 Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
   if (!isa<VectorType>(Inst.getType()))
     return nullptr;
@@ -2334,6 +2360,36 @@ Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
   assert(cast<VectorType>(RHS->getType())->getElementCount() ==
          cast<VectorType>(Inst.getType())->getElementCount());
 
+  auto foldConstantsThroughSubVectorInsert =
+      [&](Constant *Dest, Value *DestIdx, Type *SubVecType, Constant *SubVector,
+          Constant *Splat, bool SplatLHS) -> Instruction * {
+    SubVector =
+        constantFoldBinOpWithSplat(Opcode, SubVector, Splat, SplatLHS, DL);
+    Dest = constantFoldBinOpWithSplat(Opcode, Dest, Splat, SplatLHS, DL);
+    if (!SubVector || !Dest)
+      return nullptr;
+    auto *InsertVector =
+        Builder.CreateInsertVector(Dest->getType(), Dest, SubVector, DestIdx);
+    InsertVector->removeFromParent();
+    return InsertVector;
+  };
+
+  // If one operand is a constant splat and the other operand is a
+  // `vector.insert` where both the destination and subvector are constant,
+  // apply the operation to both the destination and subvector, returning a new
+  // constant `vector.insert`. This helps constant folding for scalable vectors.
+  for (bool SwapOperands : {false, true}) {
+    Value *Idx, *MaybeSubVector = LHS, *MaybeSplat = RHS;
+    if (SwapOperands)
+      std::swap(MaybeSplat, MaybeSubVector);
+    Constant *SubVector, *Dest, *Splat;
+    if (matchConstantSubVector(MaybeSubVector, Dest, SubVector, Idx) &&
+        (Splat = matchConstantSplat(MaybeSplat)))
+      return foldConstantsThroughSubVectorInsert(
+          Dest, Idx, SubVector->getType(), SubVector, Splat,
+          /*SplatLHS=*/SwapOperands);
+  }
+
   // If both operands of the binop are vector concatenations, then perform the
   // narrow binop on each pair of the source operands followed by concatenation
   // of the results.
diff --git a/llvm/test/Transforms/InstCombine/constant-vector-insert.ll b/llvm/test/Transforms/InstCombine/constant-vector-insert.ll
index 088cd8702ccb7..aa1ac1eac3ba0 100644
--- a/llvm/test/Transforms/InstCombine/constant-vector-insert.ll
+++ b/llvm/test/Transforms/InstCombine/constant-vector-insert.ll
@@ -4,8 +4,7 @@
 define <vscale x 4 x i32> @insert_div() {
 ; CHECK-LABEL: @insert_div(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 9), i64 0)
-; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[TMP0]], splat (i32 3)
+; CHECK-NEXT:    [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 3), i64 0)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
 ;
 entry:
@@ -17,8 +16,7 @@ entry:
 define <vscale x 4 x i32> @insert_div_splat_lhs() {
 ; CHECK-LABEL: @insert_div_splat_lhs(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 9), i64 0)
-; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> splat (i32 3), [[TMP0]]
+; CHECK-NEXT:    [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> zeroinitializer, i64 0)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
 ;
 entry:
@@ -30,8 +28,7 @@ entry:
 define <vscale x 4 x i32> @insert_div_mixed_splat() {
 ; CHECK-LABEL: @insert_div_mixed_splat(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 18), <4 x i32> splat (i32 9), i64 0)
-; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[TMP0]], splat (i32 3)
+; CHECK-NEXT:    [[DIV:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 6), <4 x i32> splat (i32 3), i64 0)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[DIV]]
 ;
 entry:
@@ -43,8 +40,7 @@ entry:
 define <vscale x 4 x i32> @insert_mul() {
 ; CHECK-LABEL: @insert_mul(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 1), i64 4)
-; CHECK-NEXT:    [[MUL:%.*]] = mul <vscale x 4 x i32> [[TMP0]], splat (i32 7)
+; CHECK-NEXT:    [[MUL:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 7), i64 4)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[MUL]]
 ;
 entry:
@@ -56,8 +52,7 @@ entry:
 define <vscale x 4 x i32> @insert_add() {
 ; CHECK-LABEL: @insert_add(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 5), i64 0)
-; CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 4 x i32> [[TMP0]], splat (i32 11)
+; CHECK-NEXT:    [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 16), i64 0)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
 ;
 entry:
@@ -69,8 +64,7 @@ entry:
 define <vscale x 8 x i32> @insert_add_scalable_subvector() {
 ; CHECK-LABEL: @insert_add_scalable_subvector(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> splat (i32 16), <vscale x 4 x i32> splat (i32 -8), i64 0)
-; CHECK-NEXT:    [[ADD:%.*]] = add <vscale x 8 x i32> [[TMP0]], splat (i32 4)
+; CHECK-NEXT:    [[ADD:%.*]] = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> splat (i32 20), <vscale x 4 x i32> splat (i32 -4), i64 0)
 ; CHECK-NEXT:    ret <vscale x 8 x i32> [[ADD]]
 ;
 entry:
@@ -82,8 +76,7 @@ entry:
 define <vscale x 4 x i32> @insert_sub() {
 ; CHECK-LABEL: @insert_sub(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> splat (i32 11), i64 8)
-; CHECK-NEXT:    [[SUB:%.*]] = add <vscale x 4 x i32> [[TMP0]], splat (i32 -11)
+; CHECK-NEXT:    [[SUB:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> poison, <4 x i32> zeroinitializer, i64 8)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[SUB]]
 ;
 entry:
@@ -95,8 +88,7 @@ entry:
 define <vscale x 4 x i32> @insert_and_partially_undef() {
 ; CHECK-LABEL: @insert_and_partially_undef(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> undef, <4 x i32> splat (i32 6), i64 0)
-; CHECK-NEXT:    [[AND:%.*]] = and <vscale x 4 x i32> [[TMP0]], splat (i32 4)
+; CHECK-NEXT:    [[AND:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> zeroinitializer, <4 x i32> splat (i32 4), i64 0)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[AND]]
 ;
 entry:
@@ -108,9 +100,7 @@ entry:
 define <vscale x 4 x i32> @insert_fold_chain() {
 ; CHECK-LABEL: @insert_fold_chain(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 21), <4 x i32> splat (i32 12), i64 0)
-; CHECK-NEXT:    [[DIV:%.*]] = udiv <vscale x 4 x i32> [[TMP0]], splat (i32 3)
-; CHECK-NEXT:    [[ADD:%.*]] = add nuw nsw <vscale x 4 x i32> [[DIV]], splat (i32 4)
+; CHECK-NEXT:    [[ADD:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v4i32(<vscale x 4 x i32> splat (i32 11), <4 x i32> splat (i32 8), i64 0)
 ; CHECK-NEXT:    ret <vscale x 4 x i32> [[ADD]]
 ;
 entry:



More information about the llvm-commits mailing list