[llvm] [LV] Convert uniform-address unmasked scatters to scalar store. (PR #166114)

Elvis Wang via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 23 17:46:51 PST 2025


https://github.com/ElvisWang123 updated https://github.com/llvm/llvm-project/pull/166114

>From e57c74554b30245a166e0583c93a47a258124369 Mon Sep 17 00:00:00 2001
From: Elvis Wang <elvis.wang at sifive.com>
Date: Wed, 12 Nov 2025 15:32:37 -0800
Subject: [PATCH 1/5] Precommit test.

---
 .../RISCV/narrow-scatter-to-scalar-store.ll   | 52 +++++++++++++++++++
 1 file changed, 52 insertions(+)
 create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll

diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll
new file mode 100644
index 0000000000000..e49f64d3a82d1
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll
@@ -0,0 +1,52 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=loop-vectorize -force-vector-width=2 -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s
+define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) {
+; CHECK-LABEL: define void @truncate_i16_to_i8_cse(
+; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x ptr> poison, ptr [[DST]], i64 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT1:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT1]], <2 x ptr> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[SRC]], align 2
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i16> poison, i16 [[TMP0]], i64 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT]] to <2 x i8>
+; CHECK-NEXT:    call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> [[TMP1]], <2 x ptr> align 1 zeroinitializer, <2 x i1> splat (i1 true))
+; CHECK-NEXT:    call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> [[TMP1]], <2 x ptr> align 1 [[BROADCAST_SPLAT1]], <2 x i1> splat (i1 true))
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967296
+; CHECK-NEXT:    br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br label %[[EXIT:.*]]
+; CHECK:       [[EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %count = phi i32 [ 0, %entry ], [ %count.next, %loop ]
+  %val = load i16, ptr %src, align 2
+  %val.zext = zext i16 %val to i64
+  %val.trunc.zext = trunc i64 %val.zext to i8
+  store i8 %val.trunc.zext, ptr null, align 1
+  %val.trunc = trunc i16 %val to i8
+  store i8 %val.trunc, ptr %dst, align 1
+  %count.next = add i32 %count, 1
+  %exitcond = icmp eq i32 %count.next, 0
+  %iv.next = add i64 %iv, 1
+  br i1 %exitcond, label %exit, label %loop
+
+exit:
+  ret void
+}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+;.

>From 8f59bef5c4daf7d0313b9047bdfa1f929422453c Mon Sep 17 00:00:00 2001
From: Elvis Wang <elvis.wang at sifive.com>
Date: Sun, 2 Nov 2025 16:29:40 -0800
Subject: [PATCH 2/5] [LV] Convert uniform-address scatters to scalar store
 when unmasked.

This patch optimizes vector scatters that have a uniform (single-scalar) address by replacing them with "extract-last-element + scalar store" when the scatter is unmasked.

In all of these cases, at least one lane is guaranteed to execute in each vector iteration, so storing the last active element is sufficient.

Implementation:

- Add optimizeScatterWithUniformAddr(VPlan &), and invoke it from VPlanTransforms::optimize().
- Identify non-consecutive VPWidenStoreRecipe/VPWidenStoreEVLRecipe with uniform addresses.
- Replace the scatter with VPInstruction::ExtractLastElement of the stored value and a VPReplicate (scalar) store.

Notes:

- The legacy cost model can scalarize a store if both the address and the value are uniform. In VPlan we materialize the stored value via ExtractLastElement, so only the address must be uniform.
- Some of the loops won't be vectorized any sine no vector instructions
will be generated.

I plan to have a follow-up patch for convert uniform-address scatters
to scalar store when the mask is header maks. This reqiures
`extract-last-active-element` to get the correct value to store.
---
 .../Transforms/Vectorize/VPlanTransforms.cpp  | 41 +++++++++++++++++--
 .../RISCV/narrow-scatter-to-scalar-store.ll   |  7 ++--
 2 files changed, 40 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index e8fea6851dae5..0d61c88489930 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1400,14 +1400,47 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
   for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>(
            vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
     for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
-      if (!isa<VPWidenRecipe, VPWidenSelectRecipe, VPReplicateRecipe>(&R))
+      if (!isa<VPWidenRecipe, VPWidenSelectRecipe, VPReplicateRecipe,
+               VPWidenMemoryRecipe>(&R))
         continue;
       auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
       if (RepR && (RepR->isSingleScalar() || RepR->isPredicated()))
         continue;
 
-      auto *RepOrWidenR = cast<VPSingleDefRecipe>(&R);
-      if (RepR && isa<StoreInst>(RepR->getUnderlyingInstr()) &&
+      // Convert scatters with a uniform address that is unmasked into an
+      // extract-last-element + scalar store.
+      //  TODO: Add a profitability check comparing the cost of a scatter vs.
+      //  extract + scalar store.
+      auto *WidenStoreR = dyn_cast<VPWidenMemoryRecipe>(&R);
+      if (WidenStoreR && vputils::isSingleScalar(WidenStoreR->getAddr()) &&
+          !WidenStoreR->isConsecutive() &&
+          isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe>(WidenStoreR)) {
+        assert(!WidenStoreR->isReverse() &&
+               "Not consecutive memory recipes shouldn't be reversed");
+        VPValue *Mask = WidenStoreR->getMask();
+
+        // Only convert the scatter to a scalar store if it is unmasked. or
+        // TODO: Support converting scatter masked by the header mask to scalar
+        // store.
+        if (Mask)
+          continue;
+
+        auto *Extract = new VPInstruction(VPInstruction::ExtractLastElement,
+                                          {WidenStoreR->getOperand(1)});
+        Extract->insertBefore(WidenStoreR);
+
+        // TODO: Sink the scalar store recipe to middle block if possible.
+        auto *ScalarStore = new VPReplicateRecipe(
+            &WidenStoreR->getIngredient(), {Extract, WidenStoreR->getAddr()},
+            true /*IsSingleScalar*/, nullptr /*Mask*/,
+            *WidenStoreR /*Metadata*/);
+        ScalarStore->insertBefore(WidenStoreR);
+        WidenStoreR->eraseFromParent();
+        continue;
+      }
+
+      auto *RepOrWidenR = dyn_cast<VPSingleDefRecipe>(&R);
+      if (RepR && RepOrWidenR && isa<StoreInst>(RepR->getUnderlyingInstr()) &&
           vputils::isSingleScalar(RepR->getOperand(1))) {
         auto *Clone = new VPReplicateRecipe(
             RepOrWidenR->getUnderlyingInstr(), RepOrWidenR->operands(),
@@ -1427,7 +1460,7 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
       // Skip recipes that aren't single scalars or don't have only their
       // scalar results used. In the latter case, we would introduce extra
       // broadcasts.
-      if (!vputils::isSingleScalar(RepOrWidenR) ||
+      if (!RepOrWidenR || !vputils::isSingleScalar(RepOrWidenR) ||
           !all_of(RepOrWidenR->users(), [RepOrWidenR](const VPUser *U) {
             if (auto *Store = dyn_cast<VPWidenStoreRecipe>(U)) {
               // VPWidenStore doesn't have users, and stores are always
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll
index e49f64d3a82d1..5c5674dbb3f20 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll
@@ -6,8 +6,6 @@ define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
 ; CHECK:       [[VECTOR_PH]]:
-; CHECK-NEXT:    [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x ptr> poison, ptr [[DST]], i64 0
-; CHECK-NEXT:    [[BROADCAST_SPLAT1:%.*]] = shufflevector <2 x ptr> [[BROADCAST_SPLATINSERT1]], <2 x ptr> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
 ; CHECK:       [[VECTOR_BODY]]:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -15,8 +13,9 @@ define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) {
 ; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i16> poison, i16 [[TMP0]], i64 0
 ; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT]] to <2 x i8>
-; CHECK-NEXT:    call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> [[TMP1]], <2 x ptr> align 1 zeroinitializer, <2 x i1> splat (i1 true))
-; CHECK-NEXT:    call void @llvm.masked.scatter.v2i8.v2p0(<2 x i8> [[TMP1]], <2 x ptr> align 1 [[BROADCAST_SPLAT1]], <2 x i1> splat (i1 true))
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
+; CHECK-NEXT:    store i8 [[TMP2]], ptr null, align 1
+; CHECK-NEXT:    store i8 [[TMP2]], ptr [[DST]], align 1
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967296
 ; CHECK-NEXT:    br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]

>From 9ac48e0fac059a86263099823526c1672900222b Mon Sep 17 00:00:00 2001
From: Elvis Wang <elvis.wang at sifive.com>
Date: Wed, 12 Nov 2025 23:31:08 -0800
Subject: [PATCH 3/5] !fixup, Address comments.

---
 llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 0d61c88489930..53c0805326d0c 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1401,7 +1401,7 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
            vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) {
     for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) {
       if (!isa<VPWidenRecipe, VPWidenSelectRecipe, VPReplicateRecipe,
-               VPWidenMemoryRecipe>(&R))
+               VPWidenStoreRecipe>(&R))
         continue;
       auto *RepR = dyn_cast<VPReplicateRecipe>(&R);
       if (RepR && (RepR->isSingleScalar() || RepR->isPredicated()))
@@ -1409,12 +1409,11 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
 
       // Convert scatters with a uniform address that is unmasked into an
       // extract-last-element + scalar store.
-      //  TODO: Add a profitability check comparing the cost of a scatter vs.
-      //  extract + scalar store.
-      auto *WidenStoreR = dyn_cast<VPWidenMemoryRecipe>(&R);
+      // TODO: Add a profitability check comparing the cost of a scatter vs.
+      // extract + scalar store.
+      auto *WidenStoreR = dyn_cast<VPWidenStoreRecipe>(&R);
       if (WidenStoreR && vputils::isSingleScalar(WidenStoreR->getAddr()) &&
-          !WidenStoreR->isConsecutive() &&
-          isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe>(WidenStoreR)) {
+          !WidenStoreR->isConsecutive()) {
         assert(!WidenStoreR->isReverse() &&
                "Not consecutive memory recipes shouldn't be reversed");
         VPValue *Mask = WidenStoreR->getMask();

>From 2ee05b99a94049ef5d37941ee89b296c8c586985 Mon Sep 17 00:00:00 2001
From: Elvis Wang <elvis.wang at sifive.com>
Date: Mon, 17 Nov 2025 18:04:58 -0800
Subject: [PATCH 4/5] Address comments.

---
 llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp      |  2 +-
 .../{RISCV => }/narrow-scatter-to-scalar-store.ll      | 10 +++++-----
 2 files changed, 6 insertions(+), 6 deletions(-)
 rename llvm/test/Transforms/LoopVectorize/{RISCV => }/narrow-scatter-to-scalar-store.ll (87%)

diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 53c0805326d0c..72e56f44aea89 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1418,7 +1418,7 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
                "Not consecutive memory recipes shouldn't be reversed");
         VPValue *Mask = WidenStoreR->getMask();
 
-        // Only convert the scatter to a scalar store if it is unmasked. or
+        // Only convert the scatter to a scalar store if it is unmasked.
         // TODO: Support converting scatter masked by the header mask to scalar
         // store.
         if (Mask)
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll b/llvm/test/Transforms/LoopVectorize/narrow-scatter-to-scalar-store.ll
similarity index 87%
rename from llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll
rename to llvm/test/Transforms/LoopVectorize/narrow-scatter-to-scalar-store.ll
index 5c5674dbb3f20..5a2744e4c007b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/narrow-scatter-to-scalar-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/narrow-scatter-to-scalar-store.ll
@@ -1,8 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -passes=loop-vectorize -force-vector-width=2 -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s
-define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) {
+; RUN: opt -passes=loop-vectorize -force-vector-width=2 -S %s | FileCheck %s
+define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst, ptr %dst2) {
 ; CHECK-LABEL: define void @truncate_i16_to_i8_cse(
-; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], ptr [[DST2:%.*]]) {
 ; CHECK-NEXT:  [[ENTRY:.*:]]
 ; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
 ; CHECK:       [[VECTOR_PH]]:
@@ -14,7 +14,7 @@ define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) {
 ; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT]] to <2 x i8>
 ; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
-; CHECK-NEXT:    store i8 [[TMP2]], ptr null, align 1
+; CHECK-NEXT:    store i8 [[TMP2]], ptr [[DST2]], align 1
 ; CHECK-NEXT:    store i8 [[TMP2]], ptr [[DST]], align 1
 ; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
 ; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967296
@@ -33,7 +33,7 @@ loop:
   %val = load i16, ptr %src, align 2
   %val.zext = zext i16 %val to i64
   %val.trunc.zext = trunc i64 %val.zext to i8
-  store i8 %val.trunc.zext, ptr null, align 1
+  store i8 %val.trunc.zext, ptr %dst2, align 1
   %val.trunc = trunc i16 %val to i8
   store i8 %val.trunc, ptr %dst, align 1
   %count.next = add i32 %count, 1

>From 91f9eaf9d305314775a6447bdc9eb6da5f3b374c Mon Sep 17 00:00:00 2001
From: Elvis Wang <elvis.wang at sifive.com>
Date: Sun, 23 Nov 2025 17:45:15 -0800
Subject: [PATCH 5/5] !fixup, use ExtractLastLanePerPart if the operand not
 uniform across UFVF.

Also implement the cost of the extract-last-lane-per-part to prevenet
tests changes in the RISCV/uniform-load-store.ll
---
 .../lib/Transforms/Vectorize/VPlanRecipes.cpp |   1 +
 .../Transforms/Vectorize/VPlanTransforms.cpp  |   8 +-
 .../narrow-scatter-to-scalar-store.ll         |  51 -----
 .../LoopVectorize/narrow-to-single-scalar.ll  | 174 ++++++++++++++++++
 4 files changed, 181 insertions(+), 53 deletions(-)
 delete mode 100644 llvm/test/Transforms/LoopVectorize/narrow-scatter-to-scalar-store.ll

diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 5e46659227262..5b1337be123e3 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -1208,6 +1208,7 @@ InstructionCost VPInstruction::computeCost(ElementCount VF,
                                   I32Ty, {Arg0Ty, I32Ty, I1Ty});
     return Ctx.TTI.getIntrinsicInstrCost(Attrs, Ctx.CostKind);
   }
+  case VPInstruction::ExtractLastLanePerPart:
   case VPInstruction::ExtractLastElement: {
     // Add on the cost of extracting the element.
     auto *VecTy = toVectorTy(Ctx.Types.inferScalarType(getOperand(0)), VF);
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 72e56f44aea89..45e8110ecc6de 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -1424,8 +1424,12 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) {
         if (Mask)
           continue;
 
-        auto *Extract = new VPInstruction(VPInstruction::ExtractLastElement,
-                                          {WidenStoreR->getOperand(1)});
+        unsigned ExtractOps =
+            vputils::isUniformAcrossVFsAndUFs(WidenStoreR->getOperand(1))
+                ? VPInstruction::ExtractLastElement
+                : VPInstruction::ExtractLastLanePerPart;
+        auto *Extract =
+            new VPInstruction(ExtractOps, {WidenStoreR->getOperand(1)});
         Extract->insertBefore(WidenStoreR);
 
         // TODO: Sink the scalar store recipe to middle block if possible.
diff --git a/llvm/test/Transforms/LoopVectorize/narrow-scatter-to-scalar-store.ll b/llvm/test/Transforms/LoopVectorize/narrow-scatter-to-scalar-store.ll
deleted file mode 100644
index 5a2744e4c007b..0000000000000
--- a/llvm/test/Transforms/LoopVectorize/narrow-scatter-to-scalar-store.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -passes=loop-vectorize -force-vector-width=2 -S %s | FileCheck %s
-define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst, ptr %dst2) {
-; CHECK-LABEL: define void @truncate_i16_to_i8_cse(
-; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], ptr [[DST2:%.*]]) {
-; CHECK-NEXT:  [[ENTRY:.*:]]
-; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
-; CHECK:       [[VECTOR_PH]]:
-; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
-; CHECK:       [[VECTOR_BODY]]:
-; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[SRC]], align 2
-; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i16> poison, i16 [[TMP0]], i64 0
-; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP1:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT]] to <2 x i8>
-; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
-; CHECK-NEXT:    store i8 [[TMP2]], ptr [[DST2]], align 1
-; CHECK-NEXT:    store i8 [[TMP2]], ptr [[DST]], align 1
-; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967296
-; CHECK-NEXT:    br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK:       [[MIDDLE_BLOCK]]:
-; CHECK-NEXT:    br label %[[EXIT:.*]]
-; CHECK:       [[EXIT]]:
-; CHECK-NEXT:    ret void
-;
-entry:
-  br label %loop
-
-loop:
-  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
-  %count = phi i32 [ 0, %entry ], [ %count.next, %loop ]
-  %val = load i16, ptr %src, align 2
-  %val.zext = zext i16 %val to i64
-  %val.trunc.zext = trunc i64 %val.zext to i8
-  store i8 %val.trunc.zext, ptr %dst2, align 1
-  %val.trunc = trunc i16 %val to i8
-  store i8 %val.trunc, ptr %dst, align 1
-  %count.next = add i32 %count, 1
-  %exitcond = icmp eq i32 %count.next, 0
-  %iv.next = add i64 %iv, 1
-  br i1 %exitcond, label %exit, label %loop
-
-exit:
-  ret void
-}
-;.
-; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
-; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
-; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-;.
diff --git a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
index 440309d246899..0329ea638101a 100644
--- a/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
+++ b/llvm/test/Transforms/LoopVectorize/narrow-to-single-scalar.ll
@@ -229,3 +229,177 @@ loop:
 exit:
   ret void
 }
+
+define void @narrow_scatter_with_uniform_addr_to_scalar(ptr noalias %src, ptr noalias %dst, ptr %dst2) {
+; VF4IC1-LABEL: define void @narrow_scatter_with_uniform_addr_to_scalar(
+; VF4IC1-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], ptr [[DST2:%.*]]) {
+; VF4IC1-NEXT:  [[ENTRY:.*:]]
+; VF4IC1-NEXT:    br label %[[VECTOR_PH:.*]]
+; VF4IC1:       [[VECTOR_PH]]:
+; VF4IC1-NEXT:    br label %[[VECTOR_BODY:.*]]
+; VF4IC1:       [[VECTOR_BODY]]:
+; VF4IC1-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT:    [[TMP0:%.*]] = load i16, ptr [[SRC]], align 2
+; VF4IC1-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[TMP0]], i64 0
+; VF4IC1-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer
+; VF4IC1-NEXT:    [[TMP1:%.*]] = trunc <4 x i16> [[BROADCAST_SPLAT]] to <4 x i8>
+; VF4IC1-NEXT:    [[TMP2:%.*]] = extractelement <4 x i8> [[TMP1]], i32 3
+; VF4IC1-NEXT:    store i8 [[TMP2]], ptr [[DST2]], align 1
+; VF4IC1-NEXT:    store i8 [[TMP2]], ptr [[DST]], align 1
+; VF4IC1-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; VF4IC1-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; VF4IC1-NEXT:    br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; VF4IC1:       [[MIDDLE_BLOCK]]:
+; VF4IC1-NEXT:    br label %[[EXIT:.*]]
+; VF4IC1:       [[EXIT]]:
+; VF4IC1-NEXT:    ret void
+;
+; VF2IC2-LABEL: define void @narrow_scatter_with_uniform_addr_to_scalar(
+; VF2IC2-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], ptr [[DST2:%.*]]) {
+; VF2IC2-NEXT:  [[ENTRY:.*:]]
+; VF2IC2-NEXT:    br label %[[VECTOR_PH:.*]]
+; VF2IC2:       [[VECTOR_PH]]:
+; VF2IC2-NEXT:    br label %[[VECTOR_BODY:.*]]
+; VF2IC2:       [[VECTOR_BODY]]:
+; VF2IC2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF2IC2-NEXT:    [[TMP0:%.*]] = load i16, ptr [[SRC]], align 2
+; VF2IC2-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i16> poison, i16 [[TMP0]], i64 0
+; VF2IC2-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer
+; VF2IC2-NEXT:    [[TMP1:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT]] to <2 x i8>
+; VF2IC2-NEXT:    [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1
+; VF2IC2-NEXT:    store i8 [[TMP2]], ptr [[DST2]], align 1
+; VF2IC2-NEXT:    store i8 [[TMP2]], ptr [[DST]], align 1
+; VF2IC2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; VF2IC2-NEXT:    [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; VF2IC2-NEXT:    br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; VF2IC2:       [[MIDDLE_BLOCK]]:
+; VF2IC2-NEXT:    br label %[[EXIT:.*]]
+; VF2IC2:       [[EXIT]]:
+; VF2IC2-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %val = load i16, ptr %src, align 2
+  %val.zext = zext i16 %val to i64
+  %val.trunc.zext = trunc i64 %val.zext to i8
+  store i8 %val.trunc.zext, ptr %dst2, align 1
+  %val.trunc = trunc i16 %val to i8
+  store i8 %val.trunc, ptr %dst, align 1
+  %iv.next = add i64 %iv, 1
+  %exitcond = icmp eq i64 %iv.next, 256
+  br i1 %exitcond, label %exit, label %loop
+
+exit:
+  ret void
+}
+
+define void @narrow_scatter_with_uniform_addr_to_scalar_unroll(ptr noalias %src, ptr noalias %dst, ptr %dst2) {
+; VF4IC1-LABEL: define void @narrow_scatter_with_uniform_addr_to_scalar_unroll(
+; VF4IC1-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], ptr [[DST2:%.*]]) {
+; VF4IC1-NEXT:  [[ENTRY:.*:]]
+; VF4IC1-NEXT:    br label %[[VECTOR_PH:.*]]
+; VF4IC1:       [[VECTOR_PH]]:
+; VF4IC1-NEXT:    br label %[[VECTOR_BODY:.*]]
+; VF4IC1:       [[VECTOR_BODY]]:
+; VF4IC1-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT:    [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF4IC1-NEXT:    [[TMP0:%.*]] = lshr <4 x i64> [[VEC_IND]], splat (i64 1)
+; VF4IC1-NEXT:    [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0
+; VF4IC1-NEXT:    [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1
+; VF4IC1-NEXT:    [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2
+; VF4IC1-NEXT:    [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3
+; VF4IC1-NEXT:    [[TMP5:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP1]]
+; VF4IC1-NEXT:    [[TMP6:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP2]]
+; VF4IC1-NEXT:    [[TMP7:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP3]]
+; VF4IC1-NEXT:    [[TMP8:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP4]]
+; VF4IC1-NEXT:    [[TMP9:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[TMP1]]
+; VF4IC1-NEXT:    [[TMP20:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[TMP2]]
+; VF4IC1-NEXT:    [[TMP21:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[TMP3]]
+; VF4IC1-NEXT:    [[TMP22:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[TMP4]]
+; VF4IC1-NEXT:    [[TMP23:%.*]] = load i16, ptr [[TMP9]], align 4
+; VF4IC1-NEXT:    [[TMP24:%.*]] = load i16, ptr [[TMP20]], align 4
+; VF4IC1-NEXT:    [[TMP25:%.*]] = load i16, ptr [[TMP21]], align 4
+; VF4IC1-NEXT:    [[TMP16:%.*]] = load i16, ptr [[TMP22]], align 4
+; VF4IC1-NEXT:    [[TMP17:%.*]] = insertelement <4 x i16> poison, i16 [[TMP23]], i32 0
+; VF4IC1-NEXT:    [[TMP18:%.*]] = insertelement <4 x i16> [[TMP17]], i16 [[TMP24]], i32 1
+; VF4IC1-NEXT:    [[TMP19:%.*]] = insertelement <4 x i16> [[TMP18]], i16 [[TMP25]], i32 2
+; VF4IC1-NEXT:    [[BROADCAST_SPLAT:%.*]] = insertelement <4 x i16> [[TMP19]], i16 [[TMP16]], i32 3
+; VF4IC1-NEXT:    [[TMP10:%.*]] = trunc <4 x i16> [[BROADCAST_SPLAT]] to <4 x i8>
+; VF4IC1-NEXT:    [[TMP11:%.*]] = extractelement <4 x i8> [[TMP10]], i32 3
+; VF4IC1-NEXT:    store i8 [[TMP11]], ptr [[DST2]], align 4
+; VF4IC1-NEXT:    [[TMP12:%.*]] = extractelement <4 x i8> [[TMP10]], i32 0
+; VF4IC1-NEXT:    [[TMP13:%.*]] = extractelement <4 x i8> [[TMP10]], i32 1
+; VF4IC1-NEXT:    [[TMP14:%.*]] = extractelement <4 x i8> [[TMP10]], i32 2
+; VF4IC1-NEXT:    store i8 [[TMP12]], ptr [[TMP5]], align 4
+; VF4IC1-NEXT:    store i8 [[TMP13]], ptr [[TMP6]], align 4
+; VF4IC1-NEXT:    store i8 [[TMP14]], ptr [[TMP7]], align 4
+; VF4IC1-NEXT:    store i8 [[TMP11]], ptr [[TMP8]], align 4
+; VF4IC1-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; VF4IC1-NEXT:    [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], splat (i64 4)
+; VF4IC1-NEXT:    [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; VF4IC1-NEXT:    br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; VF4IC1:       [[MIDDLE_BLOCK]]:
+; VF4IC1-NEXT:    br label %[[EXIT:.*]]
+; VF4IC1:       [[EXIT]]:
+; VF4IC1-NEXT:    ret void
+;
+; VF2IC2-LABEL: define void @narrow_scatter_with_uniform_addr_to_scalar_unroll(
+; VF2IC2-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], ptr [[DST2:%.*]]) {
+; VF2IC2-NEXT:  [[ENTRY:.*:]]
+; VF2IC2-NEXT:    br label %[[VECTOR_PH:.*]]
+; VF2IC2:       [[VECTOR_PH]]:
+; VF2IC2-NEXT:    br label %[[VECTOR_BODY:.*]]
+; VF2IC2:       [[VECTOR_BODY]]:
+; VF2IC2-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; VF2IC2-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 2
+; VF2IC2-NEXT:    [[TMP1:%.*]] = lshr i64 [[INDEX]], 1
+; VF2IC2-NEXT:    [[TMP2:%.*]] = lshr i64 [[TMP0]], 1
+; VF2IC2-NEXT:    [[TMP3:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP1]]
+; VF2IC2-NEXT:    [[TMP4:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP2]]
+; VF2IC2-NEXT:    [[TMP9:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[TMP1]]
+; VF2IC2-NEXT:    [[TMP10:%.*]] = getelementptr i64, ptr [[SRC]], i64 [[TMP2]]
+; VF2IC2-NEXT:    [[TMP5:%.*]] = load i16, ptr [[TMP9]], align 4
+; VF2IC2-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i16> poison, i16 [[TMP5]], i64 0
+; VF2IC2-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer
+; VF2IC2-NEXT:    [[TMP13:%.*]] = load i16, ptr [[TMP10]], align 4
+; VF2IC2-NEXT:    [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i16> poison, i16 [[TMP13]], i64 0
+; VF2IC2-NEXT:    [[BROADCAST_SPLAT1:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT1]], <2 x i16> poison, <2 x i32> zeroinitializer
+; VF2IC2-NEXT:    [[TMP6:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT1]] to <2 x i8>
+; VF2IC2-NEXT:    [[TMP7:%.*]] = extractelement <2 x i8> [[TMP6]], i32 1
+; VF2IC2-NEXT:    store i8 [[TMP7]], ptr [[DST2]], align 4
+; VF2IC2-NEXT:    [[TMP11:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT]] to <2 x i8>
+; VF2IC2-NEXT:    [[TMP12:%.*]] = extractelement <2 x i8> [[TMP11]], i32 1
+; VF2IC2-NEXT:    store i8 [[TMP12]], ptr [[TMP3]], align 4
+; VF2IC2-NEXT:    store i8 [[TMP7]], ptr [[TMP4]], align 4
+; VF2IC2-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; VF2IC2-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
+; VF2IC2-NEXT:    br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; VF2IC2:       [[MIDDLE_BLOCK]]:
+; VF2IC2-NEXT:    br label %[[EXIT:.*]]
+; VF2IC2:       [[EXIT]]:
+; VF2IC2-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+  %iv.shift = lshr i64 %iv, 1
+  %gep.dst = getelementptr i64, ptr %dst, i64 %iv.shift
+  %gep.src = getelementptr i64, ptr %src, i64 %iv.shift
+  %val = load i16, ptr %gep.src, align 4
+  %val.zext = zext i16 %val to i64
+  %val.trunc.zext = trunc i64 %val.zext to i8
+  store i8 %val.trunc.zext, ptr %dst2, align 4
+  %val.trunc = trunc i16 %val to i8
+  store i8 %val.trunc, ptr %gep.dst, align 4
+  %iv.next = add i64 %iv, 1
+  %exitcond = icmp eq i64 %iv.next, 256
+  br i1 %exitcond, label %exit, label %loop
+
+exit:
+  ret void
+}



More information about the llvm-commits mailing list