[llvm] [LoopVectorize] Enable shuffle padding for masked interleaved accesses (PR #75329)

via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 14 06:45:53 PST 2023


https://github.com/huhu233 updated https://github.com/llvm/llvm-project/pull/75329

>From dc582745cb2bfc9769bceb9c88fca15095801e2c Mon Sep 17 00:00:00 2001
From: zhangtiehu <z00562868 at china.huawei.com>
Date: Thu, 14 Dec 2023 22:44:14 +0800
Subject: [PATCH] [LoopVectorize] Enable shuffle padding for masked interleaved
 accesses

typedef struct {
    float x;
    float y;
    float z;
} patic;

for (int i = 0; i < num; i++) {
  ps[i].x = factor * ps[i].x;
  ps[i].y = factor * ps[i].y;
}

This patch pads the gap of the interleave store group to eliminate
masked.store, which helps to generate better code in Interleaved Access pass,
as shown,

%wide.vec = load <12 x float>; 0,1,2,3,...,11
%shuffle1 = shuffle %wide.vec, poison, <0, 3, 6, 9> ; 0,3,6,9
%shuffle2 = shuffle %wide.vec, poison, <1, 4, 7, 10> ; 1,4,7,10
%padded = shuffle %wide.vec, poison, <2, 5, 8, 11> ; 2,5,8,11

%concate1 = shuffle %op1, %op2, <0, 1, ..., 7> ; 0,3,6,9,1,4,7,10
%concate2 = shuffle %padded, poison,
            <0, 1, ..., 3, undef, undef, undef, undef> ; 2,5,8,11,poison,...,poison
%concateFinal = shuffle %concate1, %concate2,
                <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; 0,1,2,3,...,11
store <12 x float> %concateFinal

This patch adds some restrictions for shuffle padding, that is
target interleave store groups should have matched interleave
load groups, which means,
1. The value operand of StoreInst should comes from LoadInst.
2. The store group and the load group accesses the same strcut
memory.
---
 llvm/include/llvm/Analysis/VectorUtils.h      |    7 +
 llvm/lib/Analysis/VectorUtils.cpp             |   23 +
 .../Transforms/Vectorize/LoopVectorize.cpp    |   72 +-
 .../AArch64/sve-structured-store-cost.ll      |   46 +
 .../AArch64/sve-structured-store.ll           | 1221 +++++++++++++++++
 5 files changed, 1364 insertions(+), 5 deletions(-)
 create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store-cost.ll
 create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store.ll

diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h
index 55a6aa645a86e2..01727474432aeb 100644
--- a/llvm/include/llvm/Analysis/VectorUtils.h
+++ b/llvm/include/llvm/Analysis/VectorUtils.h
@@ -819,6 +819,13 @@ class InterleavedAccessInfo {
   /// Returns true if we have any interleave groups.
   bool hasGroups() const { return !InterleaveGroups.empty(); }
 
+  /// Check if the interleaved store group has matched load group, which means
+  /// the store should be satisfied with some restrictions,
+  /// 1. The value operand of StoreInst should comes from LoadInst.
+  /// 2. The store group and the load group accesses the same memory.
+  Value *hasMatchedLoadGroupForStore(Instruction *Inst, BasicBlock *BB,
+                                     Value *Ptr) const;
+
 private:
   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
   /// Simplifies SCEV expressions in the context of existing SCEV assumptions.
diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 91d8c31fa062de..b1ef36109c1669 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -1441,6 +1441,29 @@ void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() {
   RequiresScalarEpilogue = false;
 }
 
+Value *InterleavedAccessInfo::hasMatchedLoadGroupForStore(Instruction *Inst,
+                                                          BasicBlock *BB,
+                                                          Value *Ptr) const {
+  if (isa<PHINode>(Inst) || Inst->getParent() != BB)
+    return nullptr;
+
+  if (isa<LoadInst>(Inst)) {
+    Value *V = getUnderlyingObject(Inst->getOperand(0));
+    auto Group = getInterleaveGroup(Inst);
+    if (Group && (V == Ptr))
+      return Group->getInsertPos();
+  }
+
+  for (unsigned It = 0; It < Inst->getNumOperands(); It++) {
+    if (Instruction *I = dyn_cast<Instruction>(Inst->getOperand(It)))
+      if (Value *MatchedLoadGroupEntry =
+              hasMatchedLoadGroupForStore(I, BB, Ptr))
+        return MatchedLoadGroupEntry;
+  }
+
+  return nullptr;
+}
+
 template <typename InstT>
 void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const {
   llvm_unreachable("addMetadata can only be used for Instruction");
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index f82e161fb846d1..09daa3d32d3b3e 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -408,6 +408,10 @@ static constexpr uint32_t MemCheckBypassWeights[] = {1, 127};
 // after prolog. See `emitIterationCountCheck`.
 static constexpr uint32_t MinItersBypassWeights[] = {1, 127};
 
+static cl::opt<bool> EnableShufflePadding(
+    "enable-shuffle-padding", cl::init(true), cl::Hidden,
+    cl::desc("Enable shuffle padding to generate structure store."));
+
 /// A helper function that returns true if the given type is irregular. The
 /// type is irregular if its allocated size doesn't equal the store size of an
 /// element of the corresponding vector type.
@@ -796,6 +800,11 @@ class InnerLoopVectorizer {
   // correct start value of reduction PHIs when vectorizing the epilogue.
   SmallMapVector<const RecurrenceDescriptor *, PHINode *, 4>
       ReductionResumeValues;
+
+  /// The map stores shuffles which are used to pad the gap of the interleaved
+  /// store groups. The key for the map is the entry of the load group who is
+  /// matched to the related store group.
+  MapVector<Value *, SmallVector<SmallVector<Value *, 4>, 4>> PaddedShufflesMap;
 };
 
 class InnerLoopUnroller : public InnerLoopVectorizer {
@@ -1702,6 +1711,11 @@ class LoopVectorizationCostModel {
   /// \p VF is the vectorization factor chosen for the original loop.
   bool isEpilogueVectorizationProfitable(const ElementCount VF) const;
 
+  Value *hasMatchedLoadGroupForStore(Instruction *Inst, BasicBlock *BB,
+                                     Value *Ptr) const {
+    return InterleaveInfo.hasMatchedLoadGroupForStore(Inst, BB, Ptr);
+  }
+
 private:
   unsigned NumPredStores = 0;
 
@@ -2557,6 +2571,16 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
                        : ShuffledMask;
   };
 
+  Value *MatchedLoad = nullptr;
+  bool IsShufflePadding = false;
+  if (EnableShufflePadding && useMaskedInterleavedAccesses(*TTI) &&
+      TTI->enableScalableVectorization()) {
+    IsShufflePadding = true;
+    if (isa<StoreInst>(Instr) && (Group->getNumMembers() != Group->getFactor()))
+      MatchedLoad = Cost->hasMatchedLoadGroupForStore(
+          Instr, Instr->getParent(), getUnderlyingObject(Instr->getOperand(1)));
+  }
+
   // Vectorize the interleaved load group.
   if (isa<LoadInst>(Instr)) {
     Value *MaskForGaps = nullptr;
@@ -2626,8 +2650,9 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
     for (unsigned I = 0; I < InterleaveFactor; ++I) {
       Instruction *Member = Group->getMember(I);
 
-      // Skip the gaps in the group.
-      if (!Member)
+      SmallVector<Value *, 4> Shuffles;
+      // Skip the gaps in the group if there are no paddings.
+      if (!Member && !IsShufflePadding)
         continue;
 
       auto StrideMask =
@@ -2636,6 +2661,12 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
         Value *StridedVec = Builder.CreateShuffleVector(
             NewLoads[Part], StrideMask, "strided.vec");
 
+        if (!Member) {
+          if (Group->isReverse())
+            StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
+          Shuffles.push_back(StridedVec);
+          continue;
+        }
         // If this member has different type, cast the result type.
         if (Member->getType() != ScalarTy) {
           assert(!VF.isScalable() && "VF is assumed to be non scalable.");
@@ -2646,9 +2677,13 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
         if (Group->isReverse())
           StridedVec = Builder.CreateVectorReverse(StridedVec, "reverse");
 
+        Shuffles.push_back(StridedVec);
+
         State.set(VPDefs[J], StridedVec, Part);
       }
-      ++J;
+      PaddedShufflesMap[Instr].push_back(Shuffles);
+      if (Member)
+        ++J;
     }
     return;
   }
@@ -2672,6 +2707,24 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
              "Fail to get a member from an interleaved store group");
       Instruction *Member = Group->getMember(i);
 
+      if (!Member && MatchedLoad) {
+        // %wide.vec = load <12 x float>; 0,1,2,3,...,11
+        // %shuffle1 = shuffle %wide.vec, poison, <0, 3, 6, 9> ; 0,3,6,9
+        // %shuffle2 = shuffle %wide.vec, poison, <1, 4, 7, 10> ; 1,4,7,10
+        // %padded = shuffle %wide.vec, poison, <2, 5, 8, 11> ; 2,5,8,11
+        //
+        // %concate1 = shuffle %op1, %op2, <0, 1, ..., 7> ; 0,3,6,9,1,4,7,10
+        // %concate2 = shuffle %padded, poison,
+        //    <0, 1, ..., 3, undef, undef, undef, undef>
+        //    ; 2,5,8,11,poison,...,poison
+        // %concateFinal = shuffle %concate1, %concate2,
+        //    <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> ; 0,1,2,3,...,11
+        // store <12 x float> %concateFinal
+        Value *PaddedShuffle = PaddedShufflesMap[MatchedLoad][i][Part];
+        StoredVecs.push_back(PaddedShuffle);
+        continue;
+      }
+
       // Skip the gaps in the group.
       if (!Member) {
         Value *Undef = PoisonValue::get(SubVT);
@@ -2696,7 +2749,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(
     // Interleave all the smaller vectors into one wider vector.
     Value *IVec = interleaveVectors(Builder, StoredVecs, "interleaved.vec");
     Instruction *NewStoreInstr;
-    if (BlockInMask || MaskForGaps) {
+    if ((BlockInMask || MaskForGaps) && !MatchedLoad) {
       Value *GroupMask = CreateGroupMask(Part, MaskForGaps);
       NewStoreInstr = Builder.CreateMaskedStore(IVec, AddrParts[Part],
                                                 Group->getAlign(), GroupMask);
@@ -6325,10 +6378,19 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
     if (Group->getMember(IF))
       Indices.push_back(IF);
 
+  bool IsShufflePaddingStore = false;
+  if (EnableShufflePadding && useMaskedInterleavedAccesses(TTI) &&
+      TTI.enableScalableVectorization() && !VF.isScalable())
+    IsShufflePaddingStore = true;
+
   // Calculate the cost of the whole interleaved group.
+  // If shuffle padding is enabled, ignore gaps.
   bool UseMaskForGaps =
       (Group->requiresScalarEpilogue() && !isScalarEpilogueAllowed()) ||
-      (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()));
+      (isa<StoreInst>(I) && (Group->getNumMembers() < Group->getFactor()) &&
+       (!IsShufflePaddingStore ||
+        !hasMatchedLoadGroupForStore(I, I->getParent(),
+                                     getUnderlyingObject(I->getOperand(1)))));
   InstructionCost Cost = TTI.getInterleavedMemoryOpCost(
       I->getOpcode(), WideVecTy, Group->getFactor(), Indices, Group->getAlign(),
       AS, CostKind, Legal->isMaskRequired(I), UseMaskForGaps);
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store-cost.ll
new file mode 100644
index 00000000000000..0cc24fe778bbd9
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store-cost.ll
@@ -0,0 +1,46 @@
+; REQUIRES: asserts
+; RUN: opt -enable-shuffle-padding=true -enable-masked-interleaved-mem-accesses=true -passes=loop-vectorize -debug-only=loop-vectorize  -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=512 -S < %s 2>&1  | FileCheck %s --check-prefixes=PADDING
+; RUN: opt -enable-shuffle-padding=false -enable-masked-interleaved-mem-accesses=true -passes=loop-vectorize -debug-only=loop-vectorize  -mtriple=aarch64 -mattr=+sve -aarch64-sve-vector-bits-min=512 -S < %s 2>&1  | FileCheck %s --check-prefixes=NO-PADDING
+
+%struct.patic = type { float, float, float }
+
+; for (int i = 0; i < num; i++) {
+;   ps[i].x = factor * ps[i].x;
+;   ps[i].y = factor * ps[i].y;
+; }
+;
+define void @shufflePadding(i32 noundef %num, ptr nocapture noundef %ps) {
+; PADDING-LABEL: 'shufflePadding'
+; PADDING: LV: Found an estimated cost of 3 for VF 16 For instruction:   store float %mul6, ptr %y, align 4
+
+; NO-PADDING-LABEL: 'shufflePadding'
+; NO-PADDING: LV: Found an estimated cost of 188 for VF 16 For instruction:   store float %mul6, ptr %y, align 4
+entry:
+  %cmp19 = icmp sgt i32 %num, 0
+  br i1 %cmp19, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %num to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds %struct.patic, ptr %ps, i64 %indvars.iv
+  %0 = load float, ptr %arrayidx, align 4
+  %mul = fmul fast float %0, 0x40019999A0000000
+  store float %mul, ptr %arrayidx, align 4
+  %y = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 1
+  %1 = load float, ptr %y, align 4
+  %mul6 = fmul fast float %1, 0x40019999A0000000
+  store float %mul6, ptr %y, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
+}
+
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store.ll
new file mode 100644
index 00000000000000..4a4f03f24a3e6b
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-structured-store.ll
@@ -0,0 +1,1221 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -mtriple=aarch64 -mattr=+sve -S -passes=loop-vectorize,interleaved-access  -enable-masked-interleaved-mem-accesses=true -enable-shuffle-padding=true -aarch64-sve-vector-bits-min=256 < %s -o - | FileCheck %s --check-prefixes=ENABLE-SHUFFLE-PADDING
+; RUN: opt -mtriple=aarch64 -mattr=+sve -S -passes=loop-vectorize,interleaved-access  -enable-masked-interleaved-mem-accesses=true -enable-shuffle-padding=false  -aarch64-sve-vector-bits-min=256 < %s -o - | FileCheck %s --check-prefixes=DISABLE-SHUFFLE-PADDING
+
+%struct.patic = type { float, float, float }
+
+; for (int i = 0; i < num; i++) {
+;   ps[i].x = factor * ps[i].x;
+;   ps[i].y = factor * ps[i].y;
+; }
+;
+define void @test(i32 noundef %num, ptr nocapture noundef %ps) {
+; ENABLE-SHUFFLE-PADDING-LABEL: define void @test(
+; ENABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]]) #[[ATTR0:[0-9]+]] {
+; ENABLE-SHUFFLE-PADDING-NEXT:  entry:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[NUM]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP19]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.body.preheader:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[WIDE_TRIP_COUNT]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = select i1 [[TMP0]], i64 8, i64 [[N_MOD_VF]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP2]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP5]], ptr [[TMP4]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP6]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP8]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP10]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = fmul fast <8 x float> [[TMP11]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP3]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = fmul fast <8 x float> [[TMP9]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 -1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = shufflevector <8 x float> [[TMP12]], <8 x float> [[TMP14]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = shufflevector <8 x float> [[TMP7]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = shufflevector <16 x float> [[TMP16]], <16 x float> [[TMP17]], <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = shufflevector <24 x float> [[TMP18]], <24 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP20]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = shufflevector <24 x float> [[TMP18]], <24 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP22]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = shufflevector <24 x float> [[TMP18]], <24 x float> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP24]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP21]], <vscale x 4 x float> [[TMP23]], <vscale x 4 x float> [[TMP25]], <vscale x 4 x i1> [[TMP19]], ptr [[TMP15]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; ENABLE-SHUFFLE-PADDING:       middle.block:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[SCALAR_PH]]
+; ENABLE-SHUFFLE-PADDING:       scalar.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; ENABLE-SHUFFLE-PADDING-NEXT:    ret void
+; ENABLE-SHUFFLE-PADDING:       for.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP27]], 0x40019999A0000000
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL]], ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = load float, ptr [[Y]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MUL6:%.*]] = fmul fast float [[TMP28]], 0x40019999A0000000
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL6]], ptr [[Y]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+; DISABLE-SHUFFLE-PADDING-LABEL: define void @test(
+; DISABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]]) #[[ATTR0:[0-9]+]] {
+; DISABLE-SHUFFLE-PADDING-NEXT:  entry:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[NUM]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP19]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.body.preheader:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = select i1 [[TMP0]], i64 8, i64 [[N_MOD_VF]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = add i64 [[INDEX]], 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = add i64 [[INDEX]], 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = add i64 [[INDEX]], 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = add i64 [[INDEX]], 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP2]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP3]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP4]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP5]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP6]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP7]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP8]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP9]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP19]], ptr [[TMP18]])
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP20]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP22]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = fmul fast <8 x float> [[TMP23]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = extractelement <8 x float> [[TMP24]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP25]], ptr [[TMP10]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = extractelement <8 x float> [[TMP24]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP26]], ptr [[TMP11]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = extractelement <8 x float> [[TMP24]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP27]], ptr [[TMP12]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = extractelement <8 x float> [[TMP24]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP28]], ptr [[TMP13]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP29:%.*]] = extractelement <8 x float> [[TMP24]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP29]], ptr [[TMP14]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP30:%.*]] = extractelement <8 x float> [[TMP24]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP30]], ptr [[TMP15]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP31:%.*]] = extractelement <8 x float> [[TMP24]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP31]], ptr [[TMP16]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP32:%.*]] = extractelement <8 x float> [[TMP24]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP32]], ptr [[TMP17]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP10]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP11]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP12]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP13]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP14]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP15]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP39:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP16]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP17]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP41:%.*]] = fmul fast <8 x float> [[TMP21]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP42:%.*]] = extractelement <8 x float> [[TMP41]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP42]], ptr [[TMP33]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP43:%.*]] = extractelement <8 x float> [[TMP41]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP43]], ptr [[TMP34]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP44:%.*]] = extractelement <8 x float> [[TMP41]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP44]], ptr [[TMP35]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP45:%.*]] = extractelement <8 x float> [[TMP41]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP45]], ptr [[TMP36]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP46:%.*]] = extractelement <8 x float> [[TMP41]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP46]], ptr [[TMP37]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP47:%.*]] = extractelement <8 x float> [[TMP41]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP47]], ptr [[TMP38]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP48:%.*]] = extractelement <8 x float> [[TMP41]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP48]], ptr [[TMP39]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP49:%.*]] = extractelement <8 x float> [[TMP41]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP49]], ptr [[TMP40]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP50]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; DISABLE-SHUFFLE-PADDING:       middle.block:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[SCALAR_PH]]
+; DISABLE-SHUFFLE-PADDING:       scalar.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; DISABLE-SHUFFLE-PADDING-NEXT:    ret void
+; DISABLE-SHUFFLE-PADDING:       for.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP51:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP51]], 0x40019999A0000000
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL]], ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP52:%.*]] = load float, ptr [[Y]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MUL6:%.*]] = fmul fast float [[TMP52]], 0x40019999A0000000
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL6]], ptr [[Y]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+
+entry:
+  %cmp19 = icmp sgt i32 %num, 0
+  br i1 %cmp19, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %num to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds %struct.patic, ptr %ps, i64 %indvars.iv
+  %0 = load float, ptr %arrayidx, align 4
+  %mul = fmul fast float %0, 0x40019999A0000000
+  store float %mul, ptr %arrayidx, align 4
+  %y = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 1
+  %1 = load float, ptr %y, align 4
+  %mul6 = fmul fast float %1, 0x40019999A0000000
+  store float %mul6, ptr %y, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+; for (int i = 0; i < num; i++) {
+;   ps[i].x = factor * ps[i].x;
+;   ps[i].z = factor * ps[i].z;
+; }
+;
+define void @test1(i32 noundef %num, ptr nocapture noundef %ps) {
+; ENABLE-SHUFFLE-PADDING-LABEL: define void @test1(
+; ENABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]]) #[[ATTR0]] {
+; ENABLE-SHUFFLE-PADDING-NEXT:  entry:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[NUM]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP19]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.body.preheader:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP0]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP3]], ptr [[TMP2]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP4]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP6]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP8]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = fmul fast <8 x float> [[TMP9]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP1]], i64 0, i32 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = fmul fast <8 x float> [[TMP5]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP11]], i32 -2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = shufflevector <8 x float> [[TMP10]], <8 x float> [[TMP7]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = shufflevector <8 x float> [[TMP12]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = shufflevector <16 x float> [[TMP14]], <16 x float> [[TMP15]], <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = shufflevector <24 x float> [[TMP16]], <24 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP18]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = shufflevector <24 x float> [[TMP16]], <24 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP20]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = shufflevector <24 x float> [[TMP16]], <24 x float> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP22]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP19]], <vscale x 4 x float> [[TMP21]], <vscale x 4 x float> [[TMP23]], <vscale x 4 x i1> [[TMP17]], ptr [[TMP13]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; ENABLE-SHUFFLE-PADDING:       middle.block:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; ENABLE-SHUFFLE-PADDING:       scalar.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; ENABLE-SHUFFLE-PADDING-NEXT:    ret void
+; ENABLE-SHUFFLE-PADDING:       for.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP25]], 0x40019999A0000000
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL]], ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Z:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = load float, ptr [[Z]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MUL6:%.*]] = fmul fast float [[TMP26]], 0x40019999A0000000
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL6]], ptr [[Z]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+;
+; DISABLE-SHUFFLE-PADDING-LABEL: define void @test1(
+; DISABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]]) #[[ATTR0]] {
+; DISABLE-SHUFFLE-PADDING-NEXT:  entry:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[NUM]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP19]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.body.preheader:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = add i64 [[INDEX]], 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = add i64 [[INDEX]], 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP0]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP1]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP2]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP3]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP4]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP5]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP6]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP7]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP17]], ptr [[TMP16]])
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP18]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP20]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = fmul fast <8 x float> [[TMP21]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = extractelement <8 x float> [[TMP22]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP23]], ptr [[TMP8]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = extractelement <8 x float> [[TMP22]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP24]], ptr [[TMP9]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = extractelement <8 x float> [[TMP22]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP25]], ptr [[TMP10]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = extractelement <8 x float> [[TMP22]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP26]], ptr [[TMP11]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = extractelement <8 x float> [[TMP22]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP27]], ptr [[TMP12]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = extractelement <8 x float> [[TMP22]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP28]], ptr [[TMP13]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP29:%.*]] = extractelement <8 x float> [[TMP22]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP29]], ptr [[TMP14]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP30:%.*]] = extractelement <8 x float> [[TMP22]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP30]], ptr [[TMP15]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP8]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP9]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP10]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP11]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP12]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP13]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP37:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP14]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP15]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP39:%.*]] = fmul fast <8 x float> [[TMP19]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP40:%.*]] = extractelement <8 x float> [[TMP39]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP40]], ptr [[TMP31]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP41:%.*]] = extractelement <8 x float> [[TMP39]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP41]], ptr [[TMP32]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP42:%.*]] = extractelement <8 x float> [[TMP39]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP42]], ptr [[TMP33]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP43:%.*]] = extractelement <8 x float> [[TMP39]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP43]], ptr [[TMP34]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP44:%.*]] = extractelement <8 x float> [[TMP39]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP44]], ptr [[TMP35]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP45:%.*]] = extractelement <8 x float> [[TMP39]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP45]], ptr [[TMP36]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP46:%.*]] = extractelement <8 x float> [[TMP39]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP46]], ptr [[TMP37]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP47:%.*]] = extractelement <8 x float> [[TMP39]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP47]], ptr [[TMP38]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; DISABLE-SHUFFLE-PADDING:       middle.block:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; DISABLE-SHUFFLE-PADDING:       scalar.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; DISABLE-SHUFFLE-PADDING-NEXT:    ret void
+; DISABLE-SHUFFLE-PADDING:       for.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP49:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP49]], 0x40019999A0000000
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL]], ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Z:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP50:%.*]] = load float, ptr [[Z]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MUL6:%.*]] = fmul fast float [[TMP50]], 0x40019999A0000000
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL6]], ptr [[Z]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+;
+
+entry:
+  %cmp19 = icmp sgt i32 %num, 0
+  br i1 %cmp19, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %num to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds %struct.patic, ptr %ps, i64 %indvars.iv
+  %0 = load float, ptr %arrayidx, align 4
+  %mul = fmul fast float %0, 0x40019999A0000000
+  store float %mul, ptr %arrayidx, align 4
+  %z = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 2
+  %1 = load float, ptr %z, align 4
+  %mul6 = fmul fast float %1, 0x40019999A0000000
+  store float %mul6, ptr %z, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+; for (int i = 0; i < num; i++) {
+;   ps[i].y = factor * ps[i].y;
+;   ps[i].z = factor * ps[i].z;
+; }
+;
+define void @test2(i32 noundef %num, ptr nocapture noundef %ps) {
+; ENABLE-SHUFFLE-PADDING-LABEL: define void @test2(
+; ENABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]]) #[[ATTR0]] {
+; ENABLE-SHUFFLE-PADDING-NEXT:  entry:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[NUM]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP19]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.body.preheader:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[WIDE_TRIP_COUNT]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = select i1 [[TMP0]], i64 8, i64 [[N_MOD_VF]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP2]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP3]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP6]], ptr [[TMP5]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP7]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP9]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP11]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = fmul fast <8 x float> [[TMP12]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP3]], i64 0, i32 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = fmul fast <8 x float> [[TMP10]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP14]], i32 -1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = shufflevector <8 x float> [[TMP13]], <8 x float> [[TMP15]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = shufflevector <8 x float> [[TMP8]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = shufflevector <16 x float> [[TMP17]], <16 x float> [[TMP18]], <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = shufflevector <24 x float> [[TMP19]], <24 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP21]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = shufflevector <24 x float> [[TMP19]], <24 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP23]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = shufflevector <24 x float> [[TMP19]], <24 x float> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP25]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP22]], <vscale x 4 x float> [[TMP24]], <vscale x 4 x float> [[TMP26]], <vscale x 4 x i1> [[TMP20]], ptr [[TMP16]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; ENABLE-SHUFFLE-PADDING:       middle.block:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[SCALAR_PH]]
+; ENABLE-SHUFFLE-PADDING:       scalar.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; ENABLE-SHUFFLE-PADDING-NEXT:    ret void
+; ENABLE-SHUFFLE-PADDING:       for.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = load float, ptr [[Y]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP28]], 0x40019999A0000000
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL]], ptr [[Y]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Z:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP29:%.*]] = load float, ptr [[Z]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MUL6:%.*]] = fmul fast float [[TMP29]], 0x40019999A0000000
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL6]], ptr [[Z]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+;
+; DISABLE-SHUFFLE-PADDING-LABEL: define void @test2(
+; DISABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]]) #[[ATTR0]] {
+; DISABLE-SHUFFLE-PADDING-NEXT:  entry:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP19:%.*]] = icmp sgt i32 [[NUM]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP19]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.body.preheader:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = select i1 [[TMP0]], i64 8, i64 [[N_MOD_VF]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = add i64 [[INDEX]], 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = add i64 [[INDEX]], 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = add i64 [[INDEX]], 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = add i64 [[INDEX]], 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP2]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP3]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP4]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP5]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP6]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP7]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP8]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP9]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP10]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP11]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP12]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP13]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP14]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP15]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP16]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP17]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP27]], ptr [[TMP26]])
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP29:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP28]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP30:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP31:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP30]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP32:%.*]] = fmul fast <8 x float> [[TMP31]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP33:%.*]] = extractelement <8 x float> [[TMP32]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP33]], ptr [[TMP18]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP34:%.*]] = extractelement <8 x float> [[TMP32]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP34]], ptr [[TMP19]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP35:%.*]] = extractelement <8 x float> [[TMP32]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP35]], ptr [[TMP20]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP36:%.*]] = extractelement <8 x float> [[TMP32]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP36]], ptr [[TMP21]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP37:%.*]] = extractelement <8 x float> [[TMP32]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP37]], ptr [[TMP22]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP38:%.*]] = extractelement <8 x float> [[TMP32]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP38]], ptr [[TMP23]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP39:%.*]] = extractelement <8 x float> [[TMP32]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP39]], ptr [[TMP24]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP40:%.*]] = extractelement <8 x float> [[TMP32]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP40]], ptr [[TMP25]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP10]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP42:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP11]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP43:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP12]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP13]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP14]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP46:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP15]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP47:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP16]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP17]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP49:%.*]] = fmul fast <8 x float> [[TMP29]], <float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000, float 0x40019999A0000000>
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP50:%.*]] = extractelement <8 x float> [[TMP49]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP50]], ptr [[TMP41]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP51:%.*]] = extractelement <8 x float> [[TMP49]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP51]], ptr [[TMP42]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP52:%.*]] = extractelement <8 x float> [[TMP49]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP52]], ptr [[TMP43]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP53:%.*]] = extractelement <8 x float> [[TMP49]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP53]], ptr [[TMP44]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP54:%.*]] = extractelement <8 x float> [[TMP49]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP54]], ptr [[TMP45]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP55:%.*]] = extractelement <8 x float> [[TMP49]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP55]], ptr [[TMP46]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP56:%.*]] = extractelement <8 x float> [[TMP49]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP56]], ptr [[TMP47]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP57:%.*]] = extractelement <8 x float> [[TMP49]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP57]], ptr [[TMP48]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP58:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP58]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; DISABLE-SHUFFLE-PADDING:       middle.block:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[SCALAR_PH]]
+; DISABLE-SHUFFLE-PADDING:       scalar.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; DISABLE-SHUFFLE-PADDING-NEXT:    ret void
+; DISABLE-SHUFFLE-PADDING:       for.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP59:%.*]] = load float, ptr [[Y]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MUL:%.*]] = fmul fast float [[TMP59]], 0x40019999A0000000
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL]], ptr [[Y]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Z:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP60:%.*]] = load float, ptr [[Z]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MUL6:%.*]] = fmul fast float [[TMP60]], 0x40019999A0000000
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[MUL6]], ptr [[Z]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+;
+
+entry:
+  %cmp19 = icmp sgt i32 %num, 0
+  br i1 %cmp19, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %num to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds %struct.patic, ptr %ps, i64 %indvars.iv
+  %y = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 1
+  %0 = load float, ptr %y, align 4
+  %mul = fmul fast float %0, 0x40019999A0000000
+  store float %mul, ptr %y, align 4
+  %z = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 2
+  %1 = load float, ptr %z, align 4
+  %mul6 = fmul fast float %1, 0x40019999A0000000
+  store float %mul6, ptr %z, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+; Currently, we don't support the following scenario, as shuffle padding
+; requires stored values should be loaded from itself (with some operation on
+; them).
+;
+; for (int i = 0; i < num; i++) {
+;   ps[i].x = i;
+;   ps[i].y = i + 1;
+; }
+;
+define dso_local void @test3(i32 noundef %num, ptr nocapture noundef writeonly %ps) {
+; ENABLE-SHUFFLE-PADDING-LABEL: define dso_local void @test3(
+; ENABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef writeonly [[PS:%.*]]) #[[ATTR0]] {
+; ENABLE-SHUFFLE-PADDING-NEXT:  entry:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[NUM]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP10]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.body.preheader:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = add i32 [[TMP0]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i32 [[TMP0]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = sitofp i32 [[TMP1]] to float
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP2]] to float
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP3]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP4]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP5]], ptr [[TMP7]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP6]], ptr [[TMP8]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = add nuw nsw i64 [[TMP3]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = add nuw nsw i64 [[TMP4]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = trunc i64 [[TMP9]] to i32
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP10]] to i32
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = sitofp i32 [[TMP11]] to float
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = sitofp i32 [[TMP12]] to float
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP7]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP8]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP13]], ptr [[TMP15]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP14]], ptr [[TMP16]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; ENABLE-SHUFFLE-PADDING:       middle.block:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; ENABLE-SHUFFLE-PADDING:       scalar.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; ENABLE-SHUFFLE-PADDING-NEXT:    ret void
+; ENABLE-SHUFFLE-PADDING:       for.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP18]] to float
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[CONV]], ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CONV1:%.*]] = sitofp i32 [[TMP19]] to float
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[CONV1]], ptr [[Y]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+;
+; DISABLE-SHUFFLE-PADDING-LABEL: define dso_local void @test3(
+; DISABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef writeonly [[PS:%.*]]) #[[ATTR0]] {
+; DISABLE-SHUFFLE-PADDING-NEXT:  entry:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP10:%.*]] = icmp sgt i32 [[NUM]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP10]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.body.preheader:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[OFFSET_IDX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = trunc i64 [[OFFSET_IDX]] to i32
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = add i32 [[TMP0]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i32 [[TMP0]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = sitofp i32 [[TMP1]] to float
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = sitofp i32 [[TMP2]] to float
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP3]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP4]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP5]], ptr [[TMP7]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP6]], ptr [[TMP8]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = add nuw nsw i64 [[TMP3]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = add nuw nsw i64 [[TMP4]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = trunc i64 [[TMP9]] to i32
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = trunc i64 [[TMP10]] to i32
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = sitofp i32 [[TMP11]] to float
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = sitofp i32 [[TMP12]] to float
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP7]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP8]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP13]], ptr [[TMP15]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP14]], ptr [[TMP16]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; DISABLE-SHUFFLE-PADDING:       middle.block:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; DISABLE-SHUFFLE-PADDING:       scalar.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; DISABLE-SHUFFLE-PADDING-NEXT:    ret void
+; DISABLE-SHUFFLE-PADDING:       for.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CONV:%.*]] = sitofp i32 [[TMP18]] to float
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[CONV]], ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CONV1:%.*]] = sitofp i32 [[TMP19]] to float
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Y:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[CONV1]], ptr [[Y]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+;
+entry:
+  %cmp10 = icmp sgt i32 %num, 0
+  br i1 %cmp10, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %num to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %0 = trunc i64 %indvars.iv to i32
+  %conv = sitofp i32 %0 to float
+  %arrayidx = getelementptr inbounds %struct.patic, ptr %ps, i64 %indvars.iv
+  store float %conv, ptr %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %1 = trunc i64 %indvars.iv.next to i32
+  %conv1 = sitofp i32 %1 to float
+  %y = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 1
+  store float %conv1, ptr %y, align 4
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+; The feature of the case is the load group has no gap (all member is used),
+; but the store group has gap (ps[i].z is not stored).
+;
+; for (int i = 0; i < num; i++) {
+;   ps[i].x += ps[i].y;
+;   ps[i].y += ps[i].z;
+; }
+;
+define dso_local void @test4(i32 noundef %num, ptr nocapture noundef %ps, i32 noundef %x, i32 noundef %y, i32 noundef %z) {
+; ENABLE-SHUFFLE-PADDING-LABEL: define dso_local void @test4(
+; ENABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]], i32 noundef [[Z:%.*]]) #[[ATTR0]] {
+; ENABLE-SHUFFLE-PADDING-NEXT:  entry:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP20:%.*]] = icmp sgt i32 [[NUM]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP20]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.body.preheader:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP0]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP1]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP2]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP3]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 -1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = getelementptr inbounds float, ptr [[TMP5]], i32 -1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP8]], ptr [[TMP6]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP9]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP11]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP13]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[LDN8:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP15]], ptr [[TMP7]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN8]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP16]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN8]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP18]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN8]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP20]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = fadd contract <8 x float> [[TMP12]], [[TMP14]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = fadd contract <8 x float> [[TMP19]], [[TMP21]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = fadd contract <8 x float> [[TMP12]], [[TMP10]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = fadd contract <8 x float> [[TMP19]], [[TMP17]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = shufflevector <8 x float> [[TMP22]], <8 x float> [[TMP24]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = shufflevector <8 x float> [[TMP10]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = shufflevector <16 x float> [[TMP26]], <16 x float> [[TMP27]], <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP29:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP30:%.*]] = shufflevector <24 x float> [[TMP28]], <24 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP31:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP30]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP32:%.*]] = shufflevector <24 x float> [[TMP28]], <24 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP33:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP32]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP34:%.*]] = shufflevector <24 x float> [[TMP28]], <24 x float> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP35:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP34]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP31]], <vscale x 4 x float> [[TMP33]], <vscale x 4 x float> [[TMP35]], <vscale x 4 x i1> [[TMP29]], ptr [[TMP6]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP36:%.*]] = shufflevector <8 x float> [[TMP23]], <8 x float> [[TMP25]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP37:%.*]] = shufflevector <8 x float> [[TMP17]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP38:%.*]] = shufflevector <16 x float> [[TMP36]], <16 x float> [[TMP37]], <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP39:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP40:%.*]] = shufflevector <24 x float> [[TMP38]], <24 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP41:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP40]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP42:%.*]] = shufflevector <24 x float> [[TMP38]], <24 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP43:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP42]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP44:%.*]] = shufflevector <24 x float> [[TMP38]], <24 x float> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP45:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP44]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP41]], <vscale x 4 x float> [[TMP43]], <vscale x 4 x float> [[TMP45]], <vscale x 4 x i1> [[TMP39]], ptr [[TMP7]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP46]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; ENABLE-SHUFFLE-PADDING:       middle.block:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; ENABLE-SHUFFLE-PADDING:       scalar.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; ENABLE-SHUFFLE-PADDING-NEXT:    ret void
+; ENABLE-SHUFFLE-PADDING:       for.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Y1:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP47:%.*]] = load float, ptr [[Y1]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP48:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ADD:%.*]] = fadd contract float [[TMP47]], [[TMP48]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD]], ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Z7:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP49:%.*]] = load float, ptr [[Z7]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ADD11:%.*]] = fadd contract float [[TMP47]], [[TMP49]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD11]], ptr [[Y1]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+;
+; DISABLE-SHUFFLE-PADDING-LABEL: define dso_local void @test4(
+; DISABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]], i32 noundef [[Z:%.*]]) #[[ATTR0]] {
+; DISABLE-SHUFFLE-PADDING-NEXT:  entry:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP20:%.*]] = icmp sgt i32 [[NUM]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP20]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.body.preheader:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       vector.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = add i64 [[INDEX]], 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = add i64 [[INDEX]], 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = add i64 [[INDEX]], 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = add i64 [[INDEX]], 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP0]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP1]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP2]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP3]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP4]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP5]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP6]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP7]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP8]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP9]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP10]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP11]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP12]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP13]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP14]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP15]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 -1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP25]], ptr [[TMP24]])
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP26]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP29:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP28]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP30:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP31:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP30]], i64 0)
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP32:%.*]] = fadd contract <8 x float> [[TMP29]], [[TMP31]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP33:%.*]] = extractelement <8 x float> [[TMP32]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP33]], ptr [[TMP8]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP34:%.*]] = extractelement <8 x float> [[TMP32]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP34]], ptr [[TMP9]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP35:%.*]] = extractelement <8 x float> [[TMP32]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP35]], ptr [[TMP10]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP36:%.*]] = extractelement <8 x float> [[TMP32]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP36]], ptr [[TMP11]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP37:%.*]] = extractelement <8 x float> [[TMP32]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP37]], ptr [[TMP12]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP38:%.*]] = extractelement <8 x float> [[TMP32]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP38]], ptr [[TMP13]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP39:%.*]] = extractelement <8 x float> [[TMP32]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP39]], ptr [[TMP14]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP40:%.*]] = extractelement <8 x float> [[TMP32]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP40]], ptr [[TMP15]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP41:%.*]] = fadd contract <8 x float> [[TMP29]], [[TMP27]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP42:%.*]] = extractelement <8 x float> [[TMP41]], i32 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP42]], ptr [[TMP16]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP43:%.*]] = extractelement <8 x float> [[TMP41]], i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP43]], ptr [[TMP17]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP44:%.*]] = extractelement <8 x float> [[TMP41]], i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP44]], ptr [[TMP18]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP45:%.*]] = extractelement <8 x float> [[TMP41]], i32 3
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP45]], ptr [[TMP19]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP46:%.*]] = extractelement <8 x float> [[TMP41]], i32 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP46]], ptr [[TMP20]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP47:%.*]] = extractelement <8 x float> [[TMP41]], i32 5
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP47]], ptr [[TMP21]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP48:%.*]] = extractelement <8 x float> [[TMP41]], i32 6
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP48]], ptr [[TMP22]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP49:%.*]] = extractelement <8 x float> [[TMP41]], i32 7
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[TMP49]], ptr [[TMP23]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP50:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP50]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; DISABLE-SHUFFLE-PADDING:       middle.block:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; DISABLE-SHUFFLE-PADDING:       scalar.ph:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; DISABLE-SHUFFLE-PADDING-NEXT:    ret void
+; DISABLE-SHUFFLE-PADDING:       for.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Y1:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP51:%.*]] = load float, ptr [[Y1]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP52:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ADD:%.*]] = fadd contract float [[TMP51]], [[TMP52]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD]], ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Z7:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 2
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP53:%.*]] = load float, ptr [[Z7]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ADD11:%.*]] = fadd contract float [[TMP51]], [[TMP53]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD11]], ptr [[Y1]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+;
+entry:
+  %cmp20 = icmp sgt i32 %num, 0
+  br i1 %cmp20, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %num to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds %struct.patic, ptr %ps, i64 %indvars.iv
+  %y1 = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 1
+  %0 = load float, ptr %y1, align 4
+  %1 = load float, ptr %arrayidx, align 4
+  %add = fadd contract float %0, %1
+  store float %add, ptr %arrayidx, align 4
+  %z7 = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 2
+  %2 = load float, ptr %z7, align 4
+  %add11 = fadd contract float %0, %2
+  store float %add11, ptr %y1, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
+}
+
+; The feature of the case is struct member is cross used by each other, it is
+; also supported.
+;
+; for (int i = 0; i < num; i++) {
+;   ps[i].x += ps[i].y;
+;   ps[i].y += ps[i].x;
+; }
+;
+define dso_local void @test5(i32 noundef %num, ptr nocapture noundef %ps, i32 noundef %x, i32 noundef %y, i32 noundef %z) {
+; ENABLE-SHUFFLE-PADDING-LABEL: define dso_local void @test5(
+; ENABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]], i32 noundef [[Z:%.*]]) #[[ATTR0]] {
+; ENABLE-SHUFFLE-PADDING-NEXT:  entry:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[CMP20:%.*]] = icmp sgt i32 [[NUM]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP20]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.body.preheader:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[WIDE_TRIP_COUNT]], 16
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = select i1 [[TMP0]], i64 16, i64 [[N_MOD_VF]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[VECTOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       vector.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP2:%.*]] = add i64 [[INDEX]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP3:%.*]] = add i64 [[INDEX]], 8
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[TMP2]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[TMP3]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP4]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[TMP5]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP6]], i32 -1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP7]], i32 -1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP10:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[LDN:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP10]], ptr [[TMP8]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP11:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP12:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP11]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP13:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP14:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP13]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP15:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP16:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP15]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP17:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[LDN8:%.*]] = call { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } @llvm.aarch64.sve.ld3.sret.nxv4f32(<vscale x 4 x i1> [[TMP17]], ptr [[TMP9]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP18:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN8]], 2
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP19:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP18]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP20:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN8]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP21:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP20]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP22:%.*]] = extractvalue { <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float> } [[LDN8]], 0
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP23:%.*]] = call <8 x float> @llvm.vector.extract.v8f32.nxv4f32(<vscale x 4 x float> [[TMP22]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP24:%.*]] = fadd contract <8 x float> [[TMP14]], [[TMP16]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP25:%.*]] = fadd contract <8 x float> [[TMP21]], [[TMP23]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP26:%.*]] = fadd contract <8 x float> [[TMP14]], [[TMP24]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP27:%.*]] = fadd contract <8 x float> [[TMP21]], [[TMP25]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP28:%.*]] = shufflevector <8 x float> [[TMP24]], <8 x float> [[TMP26]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP29:%.*]] = shufflevector <8 x float> [[TMP12]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP30:%.*]] = shufflevector <16 x float> [[TMP28]], <16 x float> [[TMP29]], <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP31:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP32:%.*]] = shufflevector <24 x float> [[TMP30]], <24 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP33:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP32]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP34:%.*]] = shufflevector <24 x float> [[TMP30]], <24 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP35:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP34]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP36:%.*]] = shufflevector <24 x float> [[TMP30]], <24 x float> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP37:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP36]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP33]], <vscale x 4 x float> [[TMP35]], <vscale x 4 x float> [[TMP37]], <vscale x 4 x i1> [[TMP31]], ptr [[TMP8]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP38:%.*]] = shufflevector <8 x float> [[TMP25]], <8 x float> [[TMP27]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP39:%.*]] = shufflevector <8 x float> [[TMP19]], <8 x float> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP40:%.*]] = shufflevector <16 x float> [[TMP38]], <16 x float> [[TMP39]], <24 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP41:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 8)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP42:%.*]] = shufflevector <24 x float> [[TMP40]], <24 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP43:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP42]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP44:%.*]] = shufflevector <24 x float> [[TMP40]], <24 x float> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP45:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP44]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP46:%.*]] = shufflevector <24 x float> [[TMP40]], <24 x float> poison, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP47:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v8f32(<vscale x 4 x float> undef, <8 x float> [[TMP46]], i64 0)
+; ENABLE-SHUFFLE-PADDING-NEXT:    call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> [[TMP43]], <vscale x 4 x float> [[TMP45]], <vscale x 4 x float> [[TMP47]], <vscale x 4 x i1> [[TMP41]], ptr [[TMP9]])
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; ENABLE-SHUFFLE-PADDING:       middle.block:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[SCALAR_PH]]
+; ENABLE-SHUFFLE-PADDING:       scalar.ph:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; ENABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; ENABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; ENABLE-SHUFFLE-PADDING-NEXT:    ret void
+; ENABLE-SHUFFLE-PADDING:       for.body:
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[PS]], i64 [[INDVARS_IV]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[Y1:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP49:%.*]] = load float, ptr [[Y1]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[TMP50:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ADD:%.*]] = fadd contract float [[TMP49]], [[TMP50]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD]], ptr [[ARRAYIDX]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[ADD11:%.*]] = fadd contract float [[TMP49]], [[ADD]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD11]], ptr [[Y1]], align 4
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; ENABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; ENABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+;
+; DISABLE-SHUFFLE-PADDING-LABEL: define dso_local void @test5(
+; DISABLE-SHUFFLE-PADDING-SAME: i32 noundef [[NUM:%.*]], ptr nocapture noundef [[PS:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]], i32 noundef [[Z:%.*]]) #[[ATTR0]] {
+; DISABLE-SHUFFLE-PADDING-NEXT:  entry:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[CMP20:%.*]] = icmp sgt i32 [[NUM]], 0
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[CMP20]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.body.preheader:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[NUM]] to i64
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_BODY:%.*]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup.loopexit:
+; DISABLE-SHUFFLE-PADDING-NEXT:    br label [[FOR_COND_CLEANUP]]
+; DISABLE-SHUFFLE-PADDING:       for.cond.cleanup:
+; DISABLE-SHUFFLE-PADDING-NEXT:    ret void
+; DISABLE-SHUFFLE-PADDING:       for.body:
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_PATIC:%.*]], ptr [[PS]], i64 [[INDVARS_IV]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[Y1:%.*]] = getelementptr inbounds [[STRUCT_PATIC]], ptr [[ARRAYIDX]], i64 0, i32 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP0:%.*]] = load float, ptr [[Y1]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[TMP1:%.*]] = load float, ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ADD:%.*]] = fadd contract float [[TMP0]], [[TMP1]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD]], ptr [[ARRAYIDX]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[ADD11:%.*]] = fadd contract float [[TMP0]], [[ADD]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    store float [[ADD11]], ptr [[Y1]], align 4
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DISABLE-SHUFFLE-PADDING-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
+; DISABLE-SHUFFLE-PADDING-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY]]
+;
+entry:
+  %cmp20 = icmp sgt i32 %num, 0
+  br i1 %cmp20, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:                               ; preds = %entry
+  %wide.trip.count = zext i32 %num to i64
+  br label %for.body
+
+for.cond.cleanup.loopexit:                        ; preds = %for.body
+  br label %for.cond.cleanup
+
+for.cond.cleanup:                                 ; preds = %for.cond.cleanup.loopexit, %entry
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+  %arrayidx = getelementptr inbounds %struct.patic, ptr %ps, i64 %indvars.iv
+  %y1 = getelementptr inbounds %struct.patic, ptr %arrayidx, i64 0, i32 1
+  %0 = load float, ptr %y1, align 4
+  %1 = load float, ptr %arrayidx, align 4
+  %add = fadd contract float %0, %1
+  store float %add, ptr %arrayidx, align 4
+  %add11 = fadd contract float %0, %add
+  store float %add11, ptr %y1, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+  br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body
+}



More information about the llvm-commits mailing list