[llvm] [X86][SelectionDAG] Fix the Gather's base and index by modifying the Scale value (PR #137813)

Rohit Aggarwal via llvm-commits llvm-commits at lists.llvm.org
Mon May 5 00:29:59 PDT 2025


https://github.com/rohitaggarwal007 updated https://github.com/llvm/llvm-project/pull/137813

>From 741acb05b09ff5333c0166f191e4ad2ffab88496 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Wed, 19 Mar 2025 15:04:43 +0530
Subject: [PATCH 01/16] Fix the Gather's base and index for one use or multiple
 uses of Index Node. Using the approach to update the Scale if SHL Opcode and 
 followed by truncate.

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |   4 +-
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 143 ++++++++++++++++++
 llvm/test/CodeGen/X86/gatherBaseIndexFix.ll   |  68 +++++++++
 3 files changed, 213 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/gatherBaseIndexFix.ll

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 38376de5783ae..7c51ee8222512 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12131,8 +12131,8 @@ bool refineUniformBase(SDValue &BasePtr, SDValue &Index, bool IndexIsScaled,
   if (IndexIsScaled)
     return false;
 
-  if (!isNullConstant(BasePtr) && !Index.hasOneUse())
-    return false;
+  //  if (!isNullConstant(BasePtr) && !Index.hasOneUse())
+  //    return false;
 
   EVT VT = BasePtr.getValueType();
 
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 47ac1ee571269..61e6d0734f402 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56512,6 +56512,120 @@ static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
                               Scatter->isTruncatingStore());
 }
 
+// Target override this function to decide whether it want to update the base
+// and index value of a non-uniform gep
+static bool updateBaseAndIndex(SDValue &Base, SDValue &Index, SDValue &Scale,
+                               const SDLoc &DL, const SDValue &Gep,
+                               SelectionDAG &DAG) {
+  SDValue Nbase;
+  SDValue Nindex;
+  SDValue NScale;
+  bool Changed = false;
+  // This function check the opcode of Index and update the index
+  auto checkAndUpdateIndex = [&](SDValue &Idx) {
+    if (Idx.getOpcode() == ISD::SHL) {  // shl zext, BV
+      SDValue Op10 = Idx.getOperand(0); // Zext or Sext value
+      SDValue Op11 = Idx.getOperand(1); // Build vector of constant
+      std::optional<uint64_t> ShAmt = DAG.getValidMinimumShiftAmount(Idx);
+
+      unsigned IndexWidth = Op10.getScalarValueSizeInBits();
+      if ((Op10.getOpcode() == ISD::SIGN_EXTEND ||
+           Op10.getOpcode() == ISD::ZERO_EXTEND) &&
+          IndexWidth > 32 &&
+          Op10.getOperand(0).getScalarValueSizeInBits() <= 32 &&
+          DAG.ComputeNumSignBits(Op10) > (IndexWidth - 32) && ShAmt) {
+
+        KnownBits ExtKnown = DAG.computeKnownBits(Op10);
+        bool ExtIsNonNegative = ExtKnown.isNonNegative();
+        KnownBits ExtOpKnown = DAG.computeKnownBits(Op10.getOperand(0));
+        bool ExtOpIsNonNegative = ExtOpKnown.isNonNegative();
+        if (!ExtIsNonNegative || !ExtOpIsNonNegative)
+          return false;
+
+        SDValue NewOp10 =
+            Op10.getOperand(0);          // Get the Operand zero from the ext
+        EVT VT = NewOp10.getValueType(); // Use the operand's type to determine
+                                         // the type of index
+
+        // auto *ConstEltNo = dyn_cast<ConstantSDNode>(Op11.getOperand(0));
+        // if (!ConstEltNo)
+        //   return false;
+        uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
+        uint64_t NewScaleAmt = ScaleAmt * (1ULL << *ShAmt);
+        LLVM_DEBUG(dbgs() << NewScaleAmt << " NewScaleAmt"
+                          << "\n");
+        if (isPowerOf2_64(NewScaleAmt) && NewScaleAmt <= 8) {
+          // Nindex = NewOp10.getOperand(0);
+          Nindex = Op10;
+          NScale = DAG.getTargetConstant(NewScaleAmt, DL, Scale.getValueType());
+          return true;
+        }
+        // SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(),
+        //                            DAG.getConstant(ConstEltNo->getZExtValue(),
+        //                                            DL, VT.getScalarType()));
+        // Nindex = DAG.getNode(ISD::SHL, DL, VT, NewOp10,
+        //                     DAG.getBuildVector(VT, DL, Ops));
+      }
+    }
+    return false;
+  };
+
+  // For the gep instruction, we are trying to properly assign the base and
+  // index value We are go through the lower code and iterate backward.
+  if (isNullConstant(Base) && Gep.getOpcode() == ISD::ADD) {
+    SDValue Op0 = Gep.getOperand(0); // base or  add
+    SDValue Op1 = Gep.getOperand(1); // build vector or SHL
+    Nbase = Op0;
+    SDValue Idx = Op1;
+    auto Flags = Gep->getFlags();
+
+    if (Op0->getOpcode() == ISD::ADD) { // add t15(base), t18(Idx)
+      SDValue Op00 = Op0.getOperand(0); // Base
+      Nbase = Op00;
+      Idx = Op0.getOperand(1);
+    } else if (!(Op0->getOpcode() == ISD::BUILD_VECTOR &&
+                 Op0.getOperand(0).getOpcode() == ISD::CopyFromReg)) {
+      return false;
+    }
+    if (!checkAndUpdateIndex(Idx)) {
+      return false;
+    }
+    Base = Nbase.getOperand(0);
+
+    if (Op0 != Nbase) {
+      auto *ConstEltNo = dyn_cast<ConstantSDNode>(Op1.getOperand(0));
+      if (!ConstEltNo)
+        return false;
+
+      // SmallVector<SDValue, 8> Ops(
+      //    Nindex.getValueType().getVectorNumElements(),
+      //    DAG.getConstant(ConstEltNo->getZExtValue(), DL,
+      //                    Nindex.getValueType().getScalarType()));
+      Base = DAG.getNode(ISD::ADD, DL, Nbase.getOperand(0).getValueType(),
+                         Nbase.getOperand(0), Op1.getOperand(0), Flags);
+    }
+    Index = Nindex;
+    Scale = NScale;
+    Changed = true;
+  } else if (Base.getOpcode() == ISD::CopyFromReg ||
+             (Base.getOpcode() == ISD::ADD &&
+              Base.getOperand(0).getOpcode() == ISD::CopyFromReg &&
+              isConstOrConstSplat(Base.getOperand(1)))) {
+    if (checkAndUpdateIndex(Index)) {
+      Index = Nindex;
+      Changed = true;
+    }
+  }
+  if (Changed) {
+    LLVM_DEBUG(dbgs() << "Successful in updating the non uniform gep "
+                         "information\n";
+               dbgs() << "updated base "; Base.dump();
+               dbgs() << "updated Index "; Index.dump(););
+    return true;
+  }
+  return false;
+}
+
 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI) {
   SDLoc DL(N);
@@ -56523,6 +56637,29 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
   if (DCI.isBeforeLegalize()) {
+    //    if (updateBaseAndIndex(Base, Index, Scale, DL, Index, DAG))
+    //      return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
+    //
+
+    // Attempt to move shifted index into the address scale, allows further
+    // index truncation below.
+    if (Index.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Scale)) {
+      uint64_t ScaleAmt = Scale->getAsZExtVal();
+      if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {
+        if (*MinShAmt >= 1 && ScaleAmt < 8 &&
+            DAG.ComputeNumSignBits(Index.getOperand(0)) > 1) {
+          SDValue ShAmt = Index.getOperand(1);
+          SDValue NewShAmt =
+              DAG.getNode(ISD::SUB, DL, ShAmt.getValueType(), ShAmt,
+                          DAG.getConstant(1, DL, ShAmt.getValueType()));
+          SDValue NewIndex = DAG.getNode(ISD::SHL, DL, Index.getValueType(),
+                                         Index.getOperand(0), NewShAmt);
+          SDValue NewScale =
+              DAG.getConstant(ScaleAmt * 2, DL, Scale.getValueType());
+          return rebuildGatherScatter(GorS, NewIndex, Base, NewScale, DAG);
+        }
+      }
+    }
     unsigned IndexWidth = Index.getScalarValueSizeInBits();
 
     // Shrink indices if they are larger than 32-bits.
@@ -56552,6 +56689,12 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
         Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
       }
+
+      // Shrink if we remove an illegal type.
+      if (!TLI.isTypeLegal(Index.getValueType()) && TLI.isTypeLegal(NewVT)) {
+        Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
+        return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
+      }
     }
   }
 
diff --git a/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll b/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
new file mode 100644
index 0000000000000..faa83b0a20290
--- /dev/null
+++ b/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc  -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -mcpu=znver5 < %s | FileCheck %s
+
+%struct.pt = type { float, float, float, i32 }
+%struct.res = type {<16 x float>, <16 x float>}
+
+define <16 x float> @test_gather_16f32_1(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
+; CHECK-LABEL: test_gather_16f32_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm2
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
+; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
+; CHECK-NEXT:    retq
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  ret <16 x float> %res
+  }
+
+define <16 x float> @test_gather_16f32_2(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
+; CHECK-LABEL: test_gather_16f32_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm2
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
+; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
+; CHECK-NEXT:    retq
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext, i32 1
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  ret <16 x float> %res
+  }
+
+define {<16 x float>, <16 x float>} @test_gather_16f32_3(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
+; CHECK-LABEL: test_gather_16f32_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm0
+; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; CHECK-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; CHECK-NEXT:    retq
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs1 = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext
+  %res1 = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs1, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext, i32 1
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  %pair1 = insertvalue {<16 x float>, <16 x float>} undef, <16 x float> %res1, 0
+  %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
+  ret {<16 x float>, <16 x float>} %pair2
+  }

>From 9d395b137899a3c985444e191756a04400b42287 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Wed, 19 Mar 2025 15:04:43 +0530
Subject: [PATCH 02/16] Fix the Gather's base and index for one use or multiple
 uses of Index Node. Using the approach to update the Scale if SHL Opcode and 
 followed by truncate.

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |   3 -
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 143 ++++++++++++++++++
 llvm/test/CodeGen/X86/gatherBaseIndexFix.ll   |  68 +++++++++
 3 files changed, 211 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/CodeGen/X86/gatherBaseIndexFix.ll

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 38376de5783ae..a727d63c95019 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12131,9 +12131,6 @@ bool refineUniformBase(SDValue &BasePtr, SDValue &Index, bool IndexIsScaled,
   if (IndexIsScaled)
     return false;
 
-  if (!isNullConstant(BasePtr) && !Index.hasOneUse())
-    return false;
-
   EVT VT = BasePtr.getValueType();
 
   if (SDValue SplatVal = DAG.getSplatValue(Index);
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 47ac1ee571269..61e6d0734f402 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56512,6 +56512,120 @@ static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
                               Scatter->isTruncatingStore());
 }
 
+// Target override this function to decide whether it want to update the base
+// and index value of a non-uniform gep
+static bool updateBaseAndIndex(SDValue &Base, SDValue &Index, SDValue &Scale,
+                               const SDLoc &DL, const SDValue &Gep,
+                               SelectionDAG &DAG) {
+  SDValue Nbase;
+  SDValue Nindex;
+  SDValue NScale;
+  bool Changed = false;
+  // This function check the opcode of Index and update the index
+  auto checkAndUpdateIndex = [&](SDValue &Idx) {
+    if (Idx.getOpcode() == ISD::SHL) {  // shl zext, BV
+      SDValue Op10 = Idx.getOperand(0); // Zext or Sext value
+      SDValue Op11 = Idx.getOperand(1); // Build vector of constant
+      std::optional<uint64_t> ShAmt = DAG.getValidMinimumShiftAmount(Idx);
+
+      unsigned IndexWidth = Op10.getScalarValueSizeInBits();
+      if ((Op10.getOpcode() == ISD::SIGN_EXTEND ||
+           Op10.getOpcode() == ISD::ZERO_EXTEND) &&
+          IndexWidth > 32 &&
+          Op10.getOperand(0).getScalarValueSizeInBits() <= 32 &&
+          DAG.ComputeNumSignBits(Op10) > (IndexWidth - 32) && ShAmt) {
+
+        KnownBits ExtKnown = DAG.computeKnownBits(Op10);
+        bool ExtIsNonNegative = ExtKnown.isNonNegative();
+        KnownBits ExtOpKnown = DAG.computeKnownBits(Op10.getOperand(0));
+        bool ExtOpIsNonNegative = ExtOpKnown.isNonNegative();
+        if (!ExtIsNonNegative || !ExtOpIsNonNegative)
+          return false;
+
+        SDValue NewOp10 =
+            Op10.getOperand(0);          // Get the Operand zero from the ext
+        EVT VT = NewOp10.getValueType(); // Use the operand's type to determine
+                                         // the type of index
+
+        // auto *ConstEltNo = dyn_cast<ConstantSDNode>(Op11.getOperand(0));
+        // if (!ConstEltNo)
+        //   return false;
+        uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
+        uint64_t NewScaleAmt = ScaleAmt * (1ULL << *ShAmt);
+        LLVM_DEBUG(dbgs() << NewScaleAmt << " NewScaleAmt"
+                          << "\n");
+        if (isPowerOf2_64(NewScaleAmt) && NewScaleAmt <= 8) {
+          // Nindex = NewOp10.getOperand(0);
+          Nindex = Op10;
+          NScale = DAG.getTargetConstant(NewScaleAmt, DL, Scale.getValueType());
+          return true;
+        }
+        // SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(),
+        //                            DAG.getConstant(ConstEltNo->getZExtValue(),
+        //                                            DL, VT.getScalarType()));
+        // Nindex = DAG.getNode(ISD::SHL, DL, VT, NewOp10,
+        //                     DAG.getBuildVector(VT, DL, Ops));
+      }
+    }
+    return false;
+  };
+
+  // For the gep instruction, we are trying to properly assign the base and
+  // index value We are go through the lower code and iterate backward.
+  if (isNullConstant(Base) && Gep.getOpcode() == ISD::ADD) {
+    SDValue Op0 = Gep.getOperand(0); // base or  add
+    SDValue Op1 = Gep.getOperand(1); // build vector or SHL
+    Nbase = Op0;
+    SDValue Idx = Op1;
+    auto Flags = Gep->getFlags();
+
+    if (Op0->getOpcode() == ISD::ADD) { // add t15(base), t18(Idx)
+      SDValue Op00 = Op0.getOperand(0); // Base
+      Nbase = Op00;
+      Idx = Op0.getOperand(1);
+    } else if (!(Op0->getOpcode() == ISD::BUILD_VECTOR &&
+                 Op0.getOperand(0).getOpcode() == ISD::CopyFromReg)) {
+      return false;
+    }
+    if (!checkAndUpdateIndex(Idx)) {
+      return false;
+    }
+    Base = Nbase.getOperand(0);
+
+    if (Op0 != Nbase) {
+      auto *ConstEltNo = dyn_cast<ConstantSDNode>(Op1.getOperand(0));
+      if (!ConstEltNo)
+        return false;
+
+      // SmallVector<SDValue, 8> Ops(
+      //    Nindex.getValueType().getVectorNumElements(),
+      //    DAG.getConstant(ConstEltNo->getZExtValue(), DL,
+      //                    Nindex.getValueType().getScalarType()));
+      Base = DAG.getNode(ISD::ADD, DL, Nbase.getOperand(0).getValueType(),
+                         Nbase.getOperand(0), Op1.getOperand(0), Flags);
+    }
+    Index = Nindex;
+    Scale = NScale;
+    Changed = true;
+  } else if (Base.getOpcode() == ISD::CopyFromReg ||
+             (Base.getOpcode() == ISD::ADD &&
+              Base.getOperand(0).getOpcode() == ISD::CopyFromReg &&
+              isConstOrConstSplat(Base.getOperand(1)))) {
+    if (checkAndUpdateIndex(Index)) {
+      Index = Nindex;
+      Changed = true;
+    }
+  }
+  if (Changed) {
+    LLVM_DEBUG(dbgs() << "Successful in updating the non uniform gep "
+                         "information\n";
+               dbgs() << "updated base "; Base.dump();
+               dbgs() << "updated Index "; Index.dump(););
+    return true;
+  }
+  return false;
+}
+
 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI) {
   SDLoc DL(N);
@@ -56523,6 +56637,29 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
   if (DCI.isBeforeLegalize()) {
+    //    if (updateBaseAndIndex(Base, Index, Scale, DL, Index, DAG))
+    //      return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
+    //
+
+    // Attempt to move shifted index into the address scale, allows further
+    // index truncation below.
+    if (Index.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Scale)) {
+      uint64_t ScaleAmt = Scale->getAsZExtVal();
+      if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {
+        if (*MinShAmt >= 1 && ScaleAmt < 8 &&
+            DAG.ComputeNumSignBits(Index.getOperand(0)) > 1) {
+          SDValue ShAmt = Index.getOperand(1);
+          SDValue NewShAmt =
+              DAG.getNode(ISD::SUB, DL, ShAmt.getValueType(), ShAmt,
+                          DAG.getConstant(1, DL, ShAmt.getValueType()));
+          SDValue NewIndex = DAG.getNode(ISD::SHL, DL, Index.getValueType(),
+                                         Index.getOperand(0), NewShAmt);
+          SDValue NewScale =
+              DAG.getConstant(ScaleAmt * 2, DL, Scale.getValueType());
+          return rebuildGatherScatter(GorS, NewIndex, Base, NewScale, DAG);
+        }
+      }
+    }
     unsigned IndexWidth = Index.getScalarValueSizeInBits();
 
     // Shrink indices if they are larger than 32-bits.
@@ -56552,6 +56689,12 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
         Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
       }
+
+      // Shrink if we remove an illegal type.
+      if (!TLI.isTypeLegal(Index.getValueType()) && TLI.isTypeLegal(NewVT)) {
+        Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
+        return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
+      }
     }
   }
 
diff --git a/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll b/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
new file mode 100644
index 0000000000000..faa83b0a20290
--- /dev/null
+++ b/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
@@ -0,0 +1,68 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc  -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -mcpu=znver5 < %s | FileCheck %s
+
+%struct.pt = type { float, float, float, i32 }
+%struct.res = type {<16 x float>, <16 x float>}
+
+define <16 x float> @test_gather_16f32_1(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
+; CHECK-LABEL: test_gather_16f32_1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm2
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
+; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
+; CHECK-NEXT:    retq
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  ret <16 x float> %res
+  }
+
+define <16 x float> @test_gather_16f32_2(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
+; CHECK-LABEL: test_gather_16f32_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm2
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
+; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
+; CHECK-NEXT:    retq
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext, i32 1
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  ret <16 x float> %res
+  }
+
+define {<16 x float>, <16 x float>} @test_gather_16f32_3(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
+; CHECK-LABEL: test_gather_16f32_3:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
+; CHECK-NEXT:    vpmovb2m %xmm0, %k1
+; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm0
+; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    kmovq %k1, %k2
+; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; CHECK-NEXT:    vmovaps %zmm1, %zmm0
+; CHECK-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; CHECK-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; CHECK-NEXT:    retq
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs1 = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext
+  %res1 = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs1, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext, i32 1
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  %pair1 = insertvalue {<16 x float>, <16 x float>} undef, <16 x float> %res1, 0
+  %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
+  ret {<16 x float>, <16 x float>} %pair2
+  }

>From 7eb76638fcad21977183d045d2655267a8dddb98 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <44664450+rohitaggarwal007 at users.noreply.github.com>
Date: Wed, 9 Apr 2025 16:53:45 +0530
Subject: [PATCH 03/16] Update gatherBaseIndexFix.ll

---
 llvm/test/CodeGen/X86/gatherBaseIndexFix.ll | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll b/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
index a1d65a36410b4..a08ab5a936fa2 100644
--- a/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
+++ b/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
@@ -67,4 +67,4 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_3(ptr %x, ptr %arr, <16 x
   ret {<16 x float>, <16 x float>} %pair2
 }
 
-declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
\ No newline at end of file
+declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)

>From 4ed4a4d0318e63c930b286e903c4ff2123c088a7 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Wed, 9 Apr 2025 17:00:44 +0530
Subject: [PATCH 04/16] squash! Changes

---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 61e6d0734f402..afdaa485ccc88 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56643,6 +56643,7 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
 
     // Attempt to move shifted index into the address scale, allows further
     // index truncation below.
+    // TODO
     if (Index.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Scale)) {
       uint64_t ScaleAmt = Scale->getAsZExtVal();
       if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {

>From bfbbe9f1752da98fff6563e0a5889d61ea2abe99 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Wed, 16 Apr 2025 14:45:03 +0530
Subject: [PATCH 05/16] Update the masked_gather_scatter.ll

---
 .../test/CodeGen/X86/masked_gather_scatter.ll | 202 +++++++-----------
 1 file changed, 76 insertions(+), 126 deletions(-)

diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 1e05ab089b903..e587586b51aa4 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -639,7 +639,23 @@ define <16 x float> @test14(ptr %base, i32 %ind, <16 x ptr> %vec) {
 ; SKX_32-NEXT:    vpxor %xmm0, %xmm0, %xmm0
 ; SKX_32-NEXT:    vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
 ; SKX_32-NEXT:    retl
-
+; X64-LABEL: test14:
+; X64:       # %bb.0:
+; X64-NEXT:    vmovq %xmm0, %rax
+; X64-NEXT:    vpbroadcastd %esi, %zmm1
+; X64-NEXT:    kxnorw %k0, %k0, %k1
+; X64-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; X64-NEXT:    vgatherdps (%rax,%zmm1,4), %zmm0 {%k1}
+; X64-NEXT:    retq
+;
+; X86-LABEL: test14:
+; X86:       # %bb.0:
+; X86-NEXT:    vmovd %xmm0, %eax
+; X86-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %zmm1
+; X86-NEXT:    kxnorw %k0, %k0, %k1
+; X86-NEXT:    vpxor %xmm0, %xmm0, %xmm0
+; X86-NEXT:    vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
+; X86-NEXT:    retl
   %broadcast.splatinsert = insertelement <16 x ptr> %vec, ptr %base, i32 1
   %broadcast.splat = shufflevector <16 x ptr> %broadcast.splatinsert, <16 x ptr> undef, <16 x i32> zeroinitializer
 
@@ -4826,16 +4842,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vpsllq $4, %zmm0, %zmm0
-; X64-KNL-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-KNL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-KNL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm2), %ymm3 {%k2}
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm0), %ymm1 {%k1}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index:
@@ -4845,8 +4854,10 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vpslld $4, (%ecx), %zmm0
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0), %zmm1 {%k1}
+; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
+; X86-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-KNL-NEXT:    retl
 ;
@@ -4857,16 +4868,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
 ; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vpsllq $4, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-SKX-SMALL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-SMALL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm2), %ymm3 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm0), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-SMALL-NEXT:    retq
 ;
 ; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index:
@@ -4877,16 +4881,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
 ; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vpsllq $4, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-SKX-LARGE-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-LARGE-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm2), %ymm3 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm0), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-LARGE-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index:
@@ -4896,8 +4893,10 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vpslld $4, (%ecx), %zmm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%zmm0), %zmm1 {%k1}
+; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
+; X86-SKX-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X86-SKX-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-SKX-NEXT:    retl
   %wide.load = load <16 x i32>, ptr %arr, align 4
@@ -4916,16 +4915,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vpsllq $4, %zmm0, %zmm0
-; X64-KNL-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-KNL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-KNL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm2), %ymm3 {%k2}
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm0), %ymm1 {%k1}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index_offset:
@@ -4935,8 +4927,10 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vpslld $4, (%ecx), %zmm0
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0), %zmm1 {%k1}
+; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
+; X86-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-KNL-NEXT:    retl
 ;
@@ -4947,16 +4941,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
 ; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vpsllq $4, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-SKX-SMALL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-SMALL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm2), %ymm3 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm0), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-SMALL-NEXT:    retq
 ;
 ; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index_offset:
@@ -4967,16 +4954,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
 ; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vpsllq $4, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-SKX-LARGE-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-LARGE-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm2), %ymm3 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm0), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-LARGE-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index_offset:
@@ -4986,8 +4966,10 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vpslld $4, (%ecx), %zmm0
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm0), %zmm1 {%k1}
+; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
+; X86-SKX-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-SKX-NEXT:    retl
   %wide.load = load <16 x i32>, ptr %arr, align 4
@@ -5006,23 +4988,11 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vpsllq $4, %zmm0, %zmm3
-; X64-KNL-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-KNL-NEXT:    vextractf64x4 $1, %zmm1, %ymm4
-; X64-KNL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-KNL-NEXT:    kmovw %k2, %k3
-; X64-KNL-NEXT:    vmovaps %ymm4, %ymm0
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm2), %ymm0 {%k3}
-; X64-KNL-NEXT:    vmovaps %ymm1, %ymm5
-; X64-KNL-NEXT:    kmovw %k1, %k3
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm3), %ymm5 {%k3}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm5, %zmm0
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm2), %ymm4 {%k2}
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm3), %ymm1 {%k1}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm4, %zmm1, %zmm1
+; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X64-KNL-NEXT:    kmovw %k1, %k2
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_16f32_mask_index_pair:
@@ -5032,11 +5002,13 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vpslld $4, (%ecx), %zmm2
+; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
+; X86-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
 ; X86-KNL-NEXT:    kmovw %k1, %k2
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2), %zmm0 {%k2}
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2), %zmm1 {%k1}
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
 ; X86-KNL-NEXT:    retl
 ;
 ; X64-SKX-SMALL-LABEL: test_gather_16f32_mask_index_pair:
@@ -5046,23 +5018,11 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
 ; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vpsllq $4, %zmm0, %zmm3
-; X64-SKX-SMALL-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-SKX-SMALL-NEXT:    vextractf64x4 $1, %zmm1, %ymm4
-; X64-SKX-SMALL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-SMALL-NEXT:    kmovw %k2, %k3
-; X64-SKX-SMALL-NEXT:    vmovaps %ymm4, %ymm0
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm2), %ymm0 {%k3}
-; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm5
-; X64-SKX-SMALL-NEXT:    kmovw %k1, %k3
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm3), %ymm5 {%k3}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm5, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm2), %ymm4 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm3), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm4, %zmm1, %zmm1
+; X64-SKX-SMALL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
 ; X64-SKX-SMALL-NEXT:    retq
 ;
 ; X64-SKX-LARGE-LABEL: test_gather_16f32_mask_index_pair:
@@ -5073,23 +5033,11 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
 ; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vextracti64x4 $1, %zmm0, %ymm2
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vpsllq $4, %zmm0, %zmm3
-; X64-SKX-LARGE-NEXT:    vpsllq $4, %zmm2, %zmm2
-; X64-SKX-LARGE-NEXT:    vextractf64x4 $1, %zmm1, %ymm4
-; X64-SKX-LARGE-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-LARGE-NEXT:    vmovaps %ymm4, %ymm0
-; X64-SKX-LARGE-NEXT:    kmovw %k2, %k3
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm2), %ymm0 {%k3}
-; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm5
-; X64-SKX-LARGE-NEXT:    kmovw %k1, %k3
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm3), %ymm5 {%k3}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm0, %zmm5, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm2), %ymm4 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm3), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm4, %zmm1, %zmm1
+; X64-SKX-LARGE-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
 ; X64-SKX-LARGE-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_16f32_mask_index_pair:
@@ -5099,11 +5047,13 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vpslld $4, (%ecx), %zmm2
+; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
+; X86-SKX-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
+; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
 ; X86-SKX-NEXT:    kmovw %k1, %k2
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%zmm2), %zmm0 {%k2}
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm2), %zmm1 {%k1}
+; X86-SKX-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
 ; X86-SKX-NEXT:    retl
   %wide.load = load <16 x i32>, ptr %arr, align 4
   %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>

>From 4bb9f5b3831987d86184cf1b47c9c9152b37b58b Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Wed, 16 Apr 2025 15:00:39 +0530
Subject: [PATCH 06/16] Remove redundant gatherBaseIndexFix.ll

---
 llvm/test/CodeGen/X86/gatherBaseIndexFix.ll | 70 ---------------------
 1 file changed, 70 deletions(-)
 delete mode 100644 llvm/test/CodeGen/X86/gatherBaseIndexFix.ll

diff --git a/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll b/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
deleted file mode 100644
index a08ab5a936fa2..0000000000000
--- a/llvm/test/CodeGen/X86/gatherBaseIndexFix.ll
+++ /dev/null
@@ -1,70 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc  -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -mcpu=znver5 < %s | FileCheck %s
-
-%struct.pt = type { float, float, float, i32 }
-%struct.res = type {<16 x float>, <16 x float>}
-
-define <16 x float> @test_gather_16f32_1(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
-; CHECK-LABEL: test_gather_16f32_1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
-; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm2
-; CHECK-NEXT:    vpmovb2m %xmm0, %k1
-; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; CHECK-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
-; CHECK-NEXT:    vmovaps %zmm1, %zmm0
-; CHECK-NEXT:    retq
-  %wide.load = load <16 x i32>, ptr %arr, align 4
-  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <16 x i32> %and to <16 x i64>
-  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext
-  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  ret <16 x float> %res
-}
-
-define <16 x float> @test_gather_16f32_2(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
-; CHECK-LABEL: test_gather_16f32_2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
-; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm2
-; CHECK-NEXT:    vpmovb2m %xmm0, %k1
-; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm0
-; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; CHECK-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
-; CHECK-NEXT:    vmovaps %zmm1, %zmm0
-; CHECK-NEXT:    retq
-  %wide.load = load <16 x i32>, ptr %arr, align 4
-  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <16 x i32> %and to <16 x i64>
-  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext, i32 1
-  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  ret <16 x float> %res
-}
-
-define {<16 x float>, <16 x float>} @test_gather_16f32_3(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0)  {
-; CHECK-LABEL: test_gather_16f32_3:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vpsllw $7, %xmm0, %xmm0
-; CHECK-NEXT:    vpmovb2m %xmm0, %k1
-; CHECK-NEXT:    vmovdqu64 (%rsi), %zmm0
-; CHECK-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; CHECK-NEXT:    kmovq %k1, %k2
-; CHECK-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
-; CHECK-NEXT:    vmovaps %zmm1, %zmm0
-; CHECK-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; CHECK-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
-; CHECK-NEXT:    retq
-  %wide.load = load <16 x i32>, ptr %arr, align 4
-  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <16 x i32> %and to <16 x i64>
-  %ptrs1 = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext
-  %res1 = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs1, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext, i32 1
-  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  %pair1 = insertvalue {<16 x float>, <16 x float>} undef, <16 x float> %res1, 0
-  %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
-  ret {<16 x float>, <16 x float>} %pair2
-}
-
-declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)

>From fb131f8dc198c6b97a1acb2c34d7c4dc3db7e5f0 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Thu, 17 Apr 2025 14:23:31 +0530
Subject: [PATCH 07/16] Remove updateBaseIndex function and update the Checks
 for testcase

---
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 118 ------------------
 .../test/CodeGen/X86/masked_gather_scatter.ll |  35 ------
 2 files changed, 153 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e4c2c1a46eff5..16b91c78d693c 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56508,120 +56508,6 @@ static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
                               Scatter->isTruncatingStore());
 }
 
-// Target override this function to decide whether it want to update the base
-// and index value of a non-uniform gep
-static bool updateBaseAndIndex(SDValue &Base, SDValue &Index, SDValue &Scale,
-                               const SDLoc &DL, const SDValue &Gep,
-                               SelectionDAG &DAG) {
-  SDValue Nbase;
-  SDValue Nindex;
-  SDValue NScale;
-  bool Changed = false;
-  // This function check the opcode of Index and update the index
-  auto checkAndUpdateIndex = [&](SDValue &Idx) {
-    if (Idx.getOpcode() == ISD::SHL) {  // shl zext, BV
-      SDValue Op10 = Idx.getOperand(0); // Zext or Sext value
-      SDValue Op11 = Idx.getOperand(1); // Build vector of constant
-      std::optional<uint64_t> ShAmt = DAG.getValidMinimumShiftAmount(Idx);
-
-      unsigned IndexWidth = Op10.getScalarValueSizeInBits();
-      if ((Op10.getOpcode() == ISD::SIGN_EXTEND ||
-           Op10.getOpcode() == ISD::ZERO_EXTEND) &&
-          IndexWidth > 32 &&
-          Op10.getOperand(0).getScalarValueSizeInBits() <= 32 &&
-          DAG.ComputeNumSignBits(Op10) > (IndexWidth - 32) && ShAmt) {
-
-        KnownBits ExtKnown = DAG.computeKnownBits(Op10);
-        bool ExtIsNonNegative = ExtKnown.isNonNegative();
-        KnownBits ExtOpKnown = DAG.computeKnownBits(Op10.getOperand(0));
-        bool ExtOpIsNonNegative = ExtOpKnown.isNonNegative();
-        if (!ExtIsNonNegative || !ExtOpIsNonNegative)
-          return false;
-
-        SDValue NewOp10 =
-            Op10.getOperand(0);          // Get the Operand zero from the ext
-        EVT VT = NewOp10.getValueType(); // Use the operand's type to determine
-                                         // the type of index
-
-        // auto *ConstEltNo = dyn_cast<ConstantSDNode>(Op11.getOperand(0));
-        // if (!ConstEltNo)
-        //   return false;
-        uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
-        uint64_t NewScaleAmt = ScaleAmt * (1ULL << *ShAmt);
-        LLVM_DEBUG(dbgs() << NewScaleAmt << " NewScaleAmt"
-                          << "\n");
-        if (isPowerOf2_64(NewScaleAmt) && NewScaleAmt <= 8) {
-          // Nindex = NewOp10.getOperand(0);
-          Nindex = Op10;
-          NScale = DAG.getTargetConstant(NewScaleAmt, DL, Scale.getValueType());
-          return true;
-        }
-        // SmallVector<SDValue, 8> Ops(VT.getVectorNumElements(),
-        //                            DAG.getConstant(ConstEltNo->getZExtValue(),
-        //                                            DL, VT.getScalarType()));
-        // Nindex = DAG.getNode(ISD::SHL, DL, VT, NewOp10,
-        //                     DAG.getBuildVector(VT, DL, Ops));
-      }
-    }
-    return false;
-  };
-
-  // For the gep instruction, we are trying to properly assign the base and
-  // index value We are go through the lower code and iterate backward.
-  if (isNullConstant(Base) && Gep.getOpcode() == ISD::ADD) {
-    SDValue Op0 = Gep.getOperand(0); // base or  add
-    SDValue Op1 = Gep.getOperand(1); // build vector or SHL
-    Nbase = Op0;
-    SDValue Idx = Op1;
-    auto Flags = Gep->getFlags();
-
-    if (Op0->getOpcode() == ISD::ADD) { // add t15(base), t18(Idx)
-      SDValue Op00 = Op0.getOperand(0); // Base
-      Nbase = Op00;
-      Idx = Op0.getOperand(1);
-    } else if (!(Op0->getOpcode() == ISD::BUILD_VECTOR &&
-                 Op0.getOperand(0).getOpcode() == ISD::CopyFromReg)) {
-      return false;
-    }
-    if (!checkAndUpdateIndex(Idx)) {
-      return false;
-    }
-    Base = Nbase.getOperand(0);
-
-    if (Op0 != Nbase) {
-      auto *ConstEltNo = dyn_cast<ConstantSDNode>(Op1.getOperand(0));
-      if (!ConstEltNo)
-        return false;
-
-      // SmallVector<SDValue, 8> Ops(
-      //    Nindex.getValueType().getVectorNumElements(),
-      //    DAG.getConstant(ConstEltNo->getZExtValue(), DL,
-      //                    Nindex.getValueType().getScalarType()));
-      Base = DAG.getNode(ISD::ADD, DL, Nbase.getOperand(0).getValueType(),
-                         Nbase.getOperand(0), Op1.getOperand(0), Flags);
-    }
-    Index = Nindex;
-    Scale = NScale;
-    Changed = true;
-  } else if (Base.getOpcode() == ISD::CopyFromReg ||
-             (Base.getOpcode() == ISD::ADD &&
-              Base.getOperand(0).getOpcode() == ISD::CopyFromReg &&
-              isConstOrConstSplat(Base.getOperand(1)))) {
-    if (checkAndUpdateIndex(Index)) {
-      Index = Nindex;
-      Changed = true;
-    }
-  }
-  if (Changed) {
-    LLVM_DEBUG(dbgs() << "Successful in updating the non uniform gep "
-                         "information\n";
-               dbgs() << "updated base "; Base.dump();
-               dbgs() << "updated Index "; Index.dump(););
-    return true;
-  }
-  return false;
-}
-
 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI) {
   SDLoc DL(N);
@@ -56634,10 +56520,6 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
 
   if (DCI.isBeforeLegalize()) {
-    //    if (updateBaseAndIndex(Base, Index, Scale, DL, Index, DAG))
-    //      return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
-    //
-
     // Attempt to move shifted index into the address scale, allows further
     // index truncation below.
     // TODO
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index e587586b51aa4..60a7d857185fd 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -604,41 +604,6 @@ define <16 x float> @test13(ptr %base, <16 x i32> %ind) {
 
 ; The base pointer is not splat, can't find unform base
 define <16 x float> @test14(ptr %base, i32 %ind, <16 x ptr> %vec) {
-; KNL_64-LABEL: test14:
-; KNL_64:       # %bb.0:
-; KNL_64-NEXT:    vmovq %xmm0, %rax
-; KNL_64-NEXT:    vpbroadcastd %esi, %zmm1
-; KNL_64-NEXT:    kxnorw %k0, %k0, %k1
-; KNL_64-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; KNL_64-NEXT:    vgatherdps (%rax,%zmm1,4), %zmm0 {%k1}
-; KNL_64-NEXT:    retq
-;
-; KNL_32-LABEL: test14:
-; KNL_32:       # %bb.0:
-; KNL_32-NEXT:    vmovd %xmm0, %eax
-; KNL_32-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %zmm1
-; KNL_32-NEXT:    kxnorw %k0, %k0, %k1
-; KNL_32-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; KNL_32-NEXT:    vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
-; KNL_32-NEXT:    retl
-;
-; SKX-LABEL: test14:
-; SKX:       # %bb.0:
-; SKX-NEXT:    vmovq %xmm0, %rax
-; SKX-NEXT:    vpbroadcastd %esi, %zmm1
-; SKX-NEXT:    kxnorw %k0, %k0, %k1
-; SKX-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; SKX-NEXT:    vgatherdps (%rax,%zmm1,4), %zmm0 {%k1}
-; SKX-NEXT:    retq
-;
-; SKX_32-LABEL: test14:
-; SKX_32:       # %bb.0:
-; SKX_32-NEXT:    vmovd %xmm0, %eax
-; SKX_32-NEXT:    vbroadcastss {{[0-9]+}}(%esp), %zmm1
-; SKX_32-NEXT:    kxnorw %k0, %k0, %k1
-; SKX_32-NEXT:    vpxor %xmm0, %xmm0, %xmm0
-; SKX_32-NEXT:    vgatherdps (%eax,%zmm1,4), %zmm0 {%k1}
-; SKX_32-NEXT:    retl
 ; X64-LABEL: test14:
 ; X64:       # %bb.0:
 ; X64-NEXT:    vmovq %xmm0, %rax

>From 47bf70bf382640e44887c9a3e2c3422131d6a90d Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Thu, 17 Apr 2025 17:51:57 +0530
Subject: [PATCH 08/16] Revert back the hasOne check

---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 5339d42cebd5a..d72be359867ca 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12163,6 +12163,9 @@ bool refineUniformBase(SDValue &BasePtr, SDValue &Index, bool IndexIsScaled,
   if (IndexIsScaled)
     return false;
 
+  if (!isNullConstant(BasePtr) && !Index.hasOneUse())
+    return false;
+
   EVT VT = BasePtr.getValueType();
 
   if (SDValue SplatVal = DAG.getSplatValue(Index);

>From 716943edd7d951f76f828c037270730450b8bff9 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Fri, 18 Apr 2025 18:39:35 +0530
Subject: [PATCH 09/16] Fold  opertation

---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |   8 ++
 .../test/CodeGen/X86/masked_gather_scatter.ll | 117 +++++-------------
 2 files changed, 40 insertions(+), 85 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 16b91c78d693c..0a693eda4181e 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56580,6 +56580,14 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
 
   EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
 
+  if (Index.getOpcode() == ISD::SHL) {
+    unsigned BitWidth = Index.getScalarValueSizeInBits();
+    unsigned MaskBits = BitWidth - Log2_32(Scale->getAsZExtVal());
+    APInt DemandedBits = APInt::getLowBitsSet(BitWidth, MaskBits);
+    if (TLI.SimplifyDemandedBits(Index, DemandedBits, DCI)) {
+      return SDValue(N, 0);
+    }
+  }
   // Try to move splat adders from the index operand to the base
   // pointer operand. Taking care to multiply by the scale. We can only do
   // this when index element type is the same as the pointer type.
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 60a7d857185fd..4cdad06696e9e 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4806,7 +4806,6 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
 ; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
@@ -4820,36 +4819,21 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
 ; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-KNL-NEXT:    retl
 ;
-; X64-SKX-SMALL-LABEL: test_gather_structpt_16f32_mask_index:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-LARGE-NEXT:    retq
+; X64-SKX-LABEL: test_gather_structpt_16f32_mask_index:
+; X64-SKX:       # %bb.0:
+; X64-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-SKX-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index:
 ; X86-SKX:       # %bb.0:
@@ -4859,7 +4843,6 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-SKX-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
 ; X86-SKX-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -4879,7 +4862,6 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
 ; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
@@ -4893,36 +4875,21 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
 ; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-KNL-NEXT:    retl
 ;
-; X64-SKX-SMALL-LABEL: test_gather_structpt_16f32_mask_index_offset:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index_offset:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-LARGE-NEXT:    retq
+; X64-SKX-LABEL: test_gather_structpt_16f32_mask_index_offset:
+; X64-SKX:       # %bb.0:
+; X64-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
+; X64-SKX-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index_offset:
 ; X86-SKX:       # %bb.0:
@@ -4932,7 +4899,6 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-SKX-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
 ; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
@@ -4952,7 +4918,6 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
 ; X64-KNL-NEXT:    kmovw %k1, %k2
 ; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
@@ -4968,7 +4933,6 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
 ; X86-KNL-NEXT:    kmovw %k1, %k2
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
@@ -4976,34 +4940,18 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
 ; X86-KNL-NEXT:    retl
 ;
-; X64-SKX-SMALL-LABEL: test_gather_16f32_mask_index_pair:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
-; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
-; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_16f32_mask_index_pair:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
-; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
-; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
-; X64-SKX-LARGE-NEXT:    retq
+; X64-SKX-LABEL: test_gather_16f32_mask_index_pair:
+; X64-SKX:       # %bb.0:
+; X64-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-NEXT:    kmovw %k1, %k2
+; X64-SKX-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-SKX-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-SKX-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_16f32_mask_index_pair:
 ; X86-SKX:       # %bb.0:
@@ -5013,7 +4961,6 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-SKX-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}{1to16}, %zmm0, %zmm0
 ; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
 ; X86-SKX-NEXT:    kmovw %k1, %k2
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0

>From b9e571a81c62d6cb2cb1bd23d7345c66f711e23b Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Tue, 22 Apr 2025 20:39:46 +0530
Subject: [PATCH 10/16] Restrict Scale so that it can happen fully or none.
 Added a logic for identify Signed bits.

---
 llvm/lib/Target/X86/X86ISelLowering.cpp       | 14 +++-
 .../test/CodeGen/X86/masked_gather_scatter.ll | 68 ++++++++-----------
 2 files changed, 39 insertions(+), 43 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 0a693eda4181e..a3592ecf42d33 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56522,11 +56522,10 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
   if (DCI.isBeforeLegalize()) {
     // Attempt to move shifted index into the address scale, allows further
     // index truncation below.
-    // TODO
     if (Index.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Scale)) {
       uint64_t ScaleAmt = Scale->getAsZExtVal();
       if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {
-        if (*MinShAmt >= 1 && ScaleAmt < 8 &&
+        if (*MinShAmt >= 1 && (*MinShAmt + Log2_64(ScaleAmt)) < 4 &&
             DAG.ComputeNumSignBits(Index.getOperand(0)) > 1) {
           SDValue ShAmt = Index.getOperand(1);
           SDValue NewShAmt =
@@ -56546,7 +56545,16 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
     // Only do this before legalize types since v2i64 could become v2i32.
     // FIXME: We could check that the type is legal if we're after legalize
     // types, but then we would need to construct test cases where that happens.
-    if (IndexWidth > 32 && DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
+    unsigned ComputeNumSignBits = DAG.ComputeNumSignBits(Index);
+    if (Index.getOpcode() == ISD::SHL) {
+      if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {
+        if (DAG.ComputeNumSignBits(Index.getOperand(0)) > 1) {
+          ComputeNumSignBits += *MinShAmt;
+        }
+      }
+    }
+
+    if (IndexWidth > 32 && ComputeNumSignBits > (IndexWidth - 32)) {
       EVT NewVT = IndexVT.changeVectorElementType(MVT::i32);
 
       // FIXME: We could support more than just constant vectors, but we need to
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 4cdad06696e9e..5abda61bedf0f 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4805,9 +4805,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vpslld $4, (%rsi), %zmm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0), %zmm1 {%k1}
 ; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-KNL-NEXT:    retq
 ;
@@ -4818,9 +4817,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
+; X86-KNL-NEXT:    vpslld $4, (%ecx), %zmm0
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0), %zmm1 {%k1}
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-KNL-NEXT:    retl
 ;
@@ -4829,9 +4827,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X64-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-SKX-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-NEXT:    vpslld $4, (%rsi), %zmm0
+; X64-SKX-NEXT:    vgatherdps (%rdi,%zmm0), %zmm1 {%k1}
 ; X64-SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-NEXT:    retq
 ;
@@ -4842,9 +4839,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
 ; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
+; X86-SKX-NEXT:    vpslld $4, (%ecx), %zmm0
+; X86-SKX-NEXT:    vgatherdps (%eax,%zmm0), %zmm1 {%k1}
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-SKX-NEXT:    retl
   %wide.load = load <16 x i32>, ptr %arr, align 4
@@ -4861,9 +4857,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vpslld $4, (%rsi), %zmm0
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0), %zmm1 {%k1}
 ; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-KNL-NEXT:    retq
 ;
@@ -4874,9 +4869,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
+; X86-KNL-NEXT:    vpslld $4, (%ecx), %zmm0
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0), %zmm1 {%k1}
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-KNL-NEXT:    retl
 ;
@@ -4885,9 +4879,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X64-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X64-SKX-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-NEXT:    vpslld $4, (%rsi), %zmm0
+; X64-SKX-NEXT:    vgatherdps 4(%rdi,%zmm0), %zmm1 {%k1}
 ; X64-SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-NEXT:    retq
 ;
@@ -4898,9 +4891,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
 ; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm0
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
+; X86-SKX-NEXT:    vpslld $4, (%ecx), %zmm0
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm0), %zmm1 {%k1}
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
 ; X86-SKX-NEXT:    retl
   %wide.load = load <16 x i32>, ptr %arr, align 4
@@ -4917,12 +4909,11 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X64-KNL-NEXT:    vpslld $4, (%rsi), %zmm2
 ; X64-KNL-NEXT:    kmovw %k1, %k2
 ; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2), %zmm0 {%k2}
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2), %zmm1 {%k1}
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_16f32_mask_index_pair:
@@ -4932,12 +4923,11 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-KNL-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X86-KNL-NEXT:    vpslld $4, (%ecx), %zmm2
 ; X86-KNL-NEXT:    kmovw %k1, %k2
 ; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2), %zmm0 {%k2}
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2), %zmm1 {%k1}
 ; X86-KNL-NEXT:    retl
 ;
 ; X64-SKX-LABEL: test_gather_16f32_mask_index_pair:
@@ -4945,12 +4935,11 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-NEXT:    vpslld $4, (%rsi), %zmm2
 ; X64-SKX-NEXT:    kmovw %k1, %k2
 ; X64-SKX-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-SKX-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-SKX-NEXT:    vgatherdps (%rdi,%zmm2), %zmm0 {%k2}
+; X64-SKX-NEXT:    vgatherdps 4(%rdi,%zmm2), %zmm1 {%k1}
 ; X64-SKX-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_16f32_mask_index_pair:
@@ -4960,12 +4949,11 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovdqu64 (%ecx), %zmm0
-; X86-SKX-NEXT:    vpaddd %zmm0, %zmm0, %zmm2
+; X86-SKX-NEXT:    vpslld $4, (%ecx), %zmm2
 ; X86-SKX-NEXT:    kmovw %k1, %k2
 ; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
+; X86-SKX-NEXT:    vgatherdps (%eax,%zmm2), %zmm0 {%k2}
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm2), %zmm1 {%k1}
 ; X86-SKX-NEXT:    retl
   %wide.load = load <16 x i32>, ptr %arr, align 4
   %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>

>From f0a175a86f6f9eb7ffc2c57e679b43ad6668afd6 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Wed, 23 Apr 2025 15:22:53 +0530
Subject: [PATCH 11/16] Add test case and merge usecases fo SHL

---
 llvm/lib/Target/X86/X86ISelLowering.cpp       |  24 +-
 .../test/CodeGen/X86/masked_gather_scatter.ll | 438 ++++++++++++++++++
 2 files changed, 454 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a3592ecf42d33..13506677ccc6b 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56523,6 +56523,14 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
     // Attempt to move shifted index into the address scale, allows further
     // index truncation below.
     if (Index.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Scale)) {
+      unsigned BitWidth = Index.getScalarValueSizeInBits();
+      unsigned MaskBits = BitWidth - Log2_32(Scale->getAsZExtVal());
+      APInt DemandedBits = APInt::getLowBitsSet(BitWidth, MaskBits);
+      if (TLI.SimplifyDemandedBits(Index, DemandedBits, DCI)) {
+        if (N->getOpcode() != ISD::DELETED_NODE)
+          DCI.AddToWorklist(N);
+        return SDValue(N, 0);
+      }
       uint64_t ScaleAmt = Scale->getAsZExtVal();
       if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {
         if (*MinShAmt >= 1 && (*MinShAmt + Log2_64(ScaleAmt)) < 4 &&
@@ -56588,14 +56596,14 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
 
   EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
 
-  if (Index.getOpcode() == ISD::SHL) {
-    unsigned BitWidth = Index.getScalarValueSizeInBits();
-    unsigned MaskBits = BitWidth - Log2_32(Scale->getAsZExtVal());
-    APInt DemandedBits = APInt::getLowBitsSet(BitWidth, MaskBits);
-    if (TLI.SimplifyDemandedBits(Index, DemandedBits, DCI)) {
-      return SDValue(N, 0);
-    }
-  }
+  // if (Index.getOpcode() == ISD::SHL) {
+  //   unsigned BitWidth = Index.getScalarValueSizeInBits();
+  //   unsigned MaskBits = BitWidth - Log2_32(Scale->getAsZExtVal());
+  //   APInt DemandedBits = APInt::getLowBitsSet(BitWidth, MaskBits);
+  //   if (TLI.SimplifyDemandedBits(Index, DemandedBits, DCI)) {
+  //     return SDValue(N, 0);
+  //   }
+  // }
   // Try to move splat adders from the index operand to the base
   // pointer operand. Taking care to multiply by the scale. We can only do
   // this when index element type is the same as the pointer type.
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 5abda61bedf0f..b80f98e449a5a 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4798,6 +4798,7 @@ declare <8 x i64> @llvm.masked.gather.v8i64.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x
 
 ; Test gathers from struct
 %struct.pt = type { float, float, float, i32 }
+%struct.pt2 = type { float, float }
 
 define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
 ; X64-KNL-LABEL: test_gather_structpt_16f32_mask_index:
@@ -4966,3 +4967,440 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
   %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
   ret {<16 x float>, <16 x float>} %pair2
 }
+
+define <8 x float> @test_gather_structpt_8f32_mask_index(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
+; X64-KNL-LABEL: test_gather_structpt_8f32_mask_index:
+; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
+; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
+; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
+; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
+; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %ymm1, %ymm0
+; X64-KNL-NEXT:    retq
+;
+; X86-KNL-LABEL: test_gather_structpt_8f32_mask_index:
+; X86-KNL:       # %bb.0:
+; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
+; X86-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
+; X86-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-KNL-NEXT:    vmovdqu (%ecx), %ymm0
+; X86-KNL-NEXT:    vpslld $3, %ymm0, %ymm0
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0), %zmm1 {%k1}
+; X86-KNL-NEXT:    vmovaps %ymm1, %ymm0
+; X86-KNL-NEXT:    retl
+;
+; X64-SKX-SMALL-LABEL: test_gather_structpt_8f32_mask_index:
+; X64-SKX-SMALL:       # %bb.0:
+; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%ymm0,8), %ymm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
+; X64-SKX-SMALL-NEXT:    retq
+;
+; X64-SKX-LARGE-LABEL: test_gather_structpt_8f32_mask_index:
+; X64-SKX-LARGE:       # %bb.0:
+; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%ymm0,8), %ymm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
+; X64-SKX-LARGE-NEXT:    retq
+;
+; X86-SKX-LABEL: test_gather_structpt_8f32_mask_index:
+; X86-SKX:       # %bb.0:
+; X86-SKX-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X86-SKX-NEXT:    vpslld $31, %ymm0, %ymm0
+; X86-SKX-NEXT:    vpmovd2m %ymm0, %k1
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SKX-NEXT:    vmovups (%ecx), %ymm0
+; X86-SKX-NEXT:    vgatherdps (%eax,%ymm0,8), %ymm1 {%k1}
+; X86-SKX-NEXT:    vmovaps %ymm1, %ymm0
+; X86-SKX-NEXT:    retl
+  %wide.load = load <8 x i32>, ptr %arr, align 4
+  %and = and <8 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <8 x i32> %and to <8 x i64>
+  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <8 x i64> %zext
+  %res = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %src0)
+  ret <8 x float> %res
+}
+
+define <8 x float> @test_gather_structpt_8f32_mask_index_offset(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
+; X64-KNL-LABEL: test_gather_structpt_8f32_mask_index_offset:
+; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
+; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
+; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
+; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
+; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm0
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %ymm1, %ymm0
+; X64-KNL-NEXT:    retq
+;
+; X86-KNL-LABEL: test_gather_structpt_8f32_mask_index_offset:
+; X86-KNL:       # %bb.0:
+; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
+; X86-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
+; X86-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-KNL-NEXT:    vmovdqu (%ecx), %ymm0
+; X86-KNL-NEXT:    vpslld $3, %ymm0, %ymm0
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0), %zmm1 {%k1}
+; X86-KNL-NEXT:    vmovaps %ymm1, %ymm0
+; X86-KNL-NEXT:    retl
+;
+; X64-SKX-SMALL-LABEL: test_gather_structpt_8f32_mask_index_offset:
+; X64-SKX-SMALL:       # %bb.0:
+; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%ymm0,8), %ymm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
+; X64-SKX-SMALL-NEXT:    retq
+;
+; X64-SKX-LARGE-LABEL: test_gather_structpt_8f32_mask_index_offset:
+; X64-SKX-LARGE:       # %bb.0:
+; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%ymm0,8), %ymm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
+; X64-SKX-LARGE-NEXT:    retq
+;
+; X86-SKX-LABEL: test_gather_structpt_8f32_mask_index_offset:
+; X86-SKX:       # %bb.0:
+; X86-SKX-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X86-SKX-NEXT:    vpslld $31, %ymm0, %ymm0
+; X86-SKX-NEXT:    vpmovd2m %ymm0, %k1
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SKX-NEXT:    vmovups (%ecx), %ymm0
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%ymm0,8), %ymm1 {%k1}
+; X86-SKX-NEXT:    vmovaps %ymm1, %ymm0
+; X86-SKX-NEXT:    retl
+  %wide.load = load <8 x i32>, ptr %arr, align 4
+  %and = and <8 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <8 x i32> %and to <8 x i64>
+  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <8 x i64> %zext, i32 1
+  %res = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %src0)
+  ret <8 x float> %res
+}
+
+define {<8 x float>, <8 x float>} @test_gather_8f32_mask_index_pair(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
+; X64-KNL-LABEL: test_gather_8f32_mask_index_pair:
+; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
+; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
+; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
+; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
+; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm2
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-KNL-NEXT:    kmovw %k1, %k2
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 killed $zmm1
+; X64-KNL-NEXT:    retq
+;
+; X86-KNL-LABEL: test_gather_8f32_mask_index_pair:
+; X86-KNL:       # %bb.0:
+; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
+; X86-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
+; X86-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
+; X86-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-KNL-NEXT:    vmovdqu (%ecx), %ymm0
+; X86-KNL-NEXT:    vpslld $3, %ymm0, %ymm2
+; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X86-KNL-NEXT:    kmovw %k1, %k2
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2), %zmm0 {%k2}
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2), %zmm1 {%k1}
+; X86-KNL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 killed $zmm1
+; X86-KNL-NEXT:    retl
+;
+; X64-SKX-SMALL-LABEL: test_gather_8f32_mask_index_pair:
+; X64-SKX-SMALL:       # %bb.0:
+; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm2
+; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
+; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%ymm2,8), %ymm0 {%k2}
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%ymm2,8), %ymm1 {%k1}
+; X64-SKX-SMALL-NEXT:    retq
+;
+; X64-SKX-LARGE-LABEL: test_gather_8f32_mask_index_pair:
+; X64-SKX-LARGE:       # %bb.0:
+; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm2
+; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
+; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%ymm2,8), %ymm0 {%k2}
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%ymm2,8), %ymm1 {%k1}
+; X64-SKX-LARGE-NEXT:    retq
+;
+; X86-SKX-LABEL: test_gather_8f32_mask_index_pair:
+; X86-SKX:       # %bb.0:
+; X86-SKX-NEXT:    vpmovsxwd %xmm0, %ymm0
+; X86-SKX-NEXT:    vpslld $31, %ymm0, %ymm0
+; X86-SKX-NEXT:    vpmovd2m %ymm0, %k1
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SKX-NEXT:    vmovups (%ecx), %ymm2
+; X86-SKX-NEXT:    kmovw %k1, %k2
+; X86-SKX-NEXT:    vmovaps %ymm1, %ymm0
+; X86-SKX-NEXT:    vgatherdps (%eax,%ymm2,8), %ymm0 {%k2}
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%ymm2,8), %ymm1 {%k1}
+; X86-SKX-NEXT:    retl
+  %wide.load = load <8 x i32>, ptr %arr, align 4
+  %and = and <8 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <8 x i32> %and to <8 x i64>
+  %ptrs1 = getelementptr inbounds %struct.pt2, ptr %x , <8 x i64> %zext
+  %res1 = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs1, i32 4, <8 x i1> %mask, <8 x float> %src0)
+  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <8 x i64> %zext, i32 1
+  %res = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %src0)
+  %pair1 = insertvalue {<8 x float>, <8 x float>} undef, <8 x float> %res1, 0
+  %pair2 = insertvalue {<8 x float>, <8 x float>} %pair1, <8 x float> %res, 1
+  ret {<8 x float>, <8 x float>} %pair2
+}
+
+define <16 x float> @test_gather_structpt_16f32_mask_index1(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
+; X64-KNL-LABEL: test_gather_structpt_16f32_mask_index1:
+; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
+; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-KNL-NEXT:    retq
+;
+; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index1:
+; X86-KNL:       # %bb.0:
+; X86-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X86-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-KNL-NEXT:    vmovups (%ecx), %zmm0
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
+; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X86-KNL-NEXT:    retl
+;
+; X64-SKX-SMALL-LABEL: test_gather_structpt_16f32_mask_index1:
+; X64-SKX-SMALL:       # %bb.0:
+; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    retq
+;
+; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index1:
+; X64-SKX-LARGE:       # %bb.0:
+; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    retq
+;
+; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index1:
+; X86-SKX:       # %bb.0:
+; X86-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X86-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
+; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SKX-NEXT:    vmovups (%ecx), %zmm0
+; X86-SKX-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
+; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
+; X86-SKX-NEXT:    retl
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  ret <16 x float> %res
+}
+
+define <16 x float> @test_gather_structpt_16f32_mask_index_offset1(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
+; X64-KNL-LABEL: test_gather_structpt_16f32_mask_index_offset1:
+; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
+; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-KNL-NEXT:    retq
+;
+; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index_offset1:
+; X86-KNL:       # %bb.0:
+; X86-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X86-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-KNL-NEXT:    vmovups (%ecx), %zmm0
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
+; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X86-KNL-NEXT:    retl
+;
+; X64-SKX-SMALL-LABEL: test_gather_structpt_16f32_mask_index_offset1:
+; X64-SKX-SMALL:       # %bb.0:
+; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    retq
+;
+; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index_offset1:
+; X64-SKX-LARGE:       # %bb.0:
+; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    retq
+;
+; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index_offset1:
+; X86-SKX:       # %bb.0:
+; X86-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X86-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
+; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SKX-NEXT:    vmovups (%ecx), %zmm0
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
+; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
+; X86-SKX-NEXT:    retl
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext, i32 1
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  ret <16 x float> %res
+}
+
+define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair2(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
+; X64-KNL-LABEL: test_gather_16f32_mask_index_pair2:
+; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
+; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
+; X64-KNL-NEXT:    kmovw %k1, %k2
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    retq
+;
+; X86-KNL-LABEL: test_gather_16f32_mask_index_pair2:
+; X86-KNL:       # %bb.0:
+; X86-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X86-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-KNL-NEXT:    vmovups (%ecx), %zmm2
+; X86-KNL-NEXT:    kmovw %k1, %k2
+; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
+; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
+; X86-KNL-NEXT:    retl
+;
+; X64-SKX-SMALL-LABEL: test_gather_16f32_mask_index_pair2:
+; X64-SKX-SMALL:       # %bb.0:
+; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
+; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-SKX-SMALL-NEXT:    retq
+;
+; X64-SKX-LARGE-LABEL: test_gather_16f32_mask_index_pair2:
+; X64-SKX-LARGE:       # %bb.0:
+; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm2
+; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-SKX-LARGE-NEXT:    retq
+;
+; X86-SKX-LABEL: test_gather_16f32_mask_index_pair2:
+; X86-SKX:       # %bb.0:
+; X86-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
+; X86-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
+; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SKX-NEXT:    vmovups (%ecx), %zmm2
+; X86-SKX-NEXT:    kmovw %k1, %k2
+; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
+; X86-SKX-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
+; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
+; X86-SKX-NEXT:    retl
+  %wide.load = load <16 x i32>, ptr %arr, align 4
+  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
+  %zext = zext <16 x i32> %and to <16 x i64>
+  %ptrs1 = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext
+  %res1 = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs1, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext, i32 1
+  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
+  %pair1 = insertvalue {<16 x float>, <16 x float>} undef, <16 x float> %res1, 0
+  %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
+  ret {<16 x float>, <16 x float>} %pair2
+}

>From 4b57e4a562d4a8cfd80bd04e8125f928e83b5f5e Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Wed, 23 Apr 2025 15:43:13 +0530
Subject: [PATCH 12/16] Update undef to poison in testcase

---
 llvm/test/CodeGen/X86/masked_gather_scatter.ll | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index b80f98e449a5a..d109e337b6581 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4963,7 +4963,7 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
   %res1 = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs1, i32 4, <16 x i1> %mask, <16 x float> %src0)
   %ptrs = getelementptr inbounds %struct.pt, ptr %x, <16 x i64> %zext, i32 1
   %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  %pair1 = insertvalue {<16 x float>, <16 x float>} undef, <16 x float> %res1, 0
+  %pair1 = insertvalue {<16 x float>, <16 x float>} poison, <16 x float> %res1, 0
   %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
   ret {<16 x float>, <16 x float>} %pair2
 }
@@ -5188,7 +5188,7 @@ define {<8 x float>, <8 x float>} @test_gather_8f32_mask_index_pair(ptr %x, ptr
   %res1 = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs1, i32 4, <8 x i1> %mask, <8 x float> %src0)
   %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <8 x i64> %zext, i32 1
   %res = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %src0)
-  %pair1 = insertvalue {<8 x float>, <8 x float>} undef, <8 x float> %res1, 0
+  %pair1 = insertvalue {<8 x float>, <8 x float>} poison, <8 x float> %res1, 0
   %pair2 = insertvalue {<8 x float>, <8 x float>} %pair1, <8 x float> %res, 1
   ret {<8 x float>, <8 x float>} %pair2
 }
@@ -5400,7 +5400,7 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair2(ptr %x,
   %res1 = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs1, i32 4, <16 x i1> %mask, <16 x float> %src0)
   %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext, i32 1
   %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  %pair1 = insertvalue {<16 x float>, <16 x float>} undef, <16 x float> %res1, 0
+  %pair1 = insertvalue {<16 x float>, <16 x float>} poison, <16 x float> %res1, 0
   %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
   ret {<16 x float>, <16 x float>} %pair2
 }

>From 5a12c7d3dc545173d51cab61a1edeca7412b41b9 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Tue, 29 Apr 2025 19:18:28 +0530
Subject: [PATCH 13/16] Update the testcase

---
 .../test/CodeGen/X86/masked_gather_scatter.ll | 219 ++++++------------
 1 file changed, 73 insertions(+), 146 deletions(-)

diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index dbf3e42f77018..553ac56d360ae 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -5408,13 +5408,13 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(
 define <8 x float> @test_gather_structpt2_8f32_mask_index(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
 ; X64-KNL-LABEL: test_gather_structpt2_8f32_mask_index:
 ; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
 ; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
-; X64-KNL-NEXT:    vpand (%rsi), %ymm0, %ymm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm0,8), %ymm1 {%k1}
+; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
+; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
 ; X64-KNL-NEXT:    vmovaps %ymm1, %ymm0
 ; X64-KNL-NEXT:    retq
 ;
@@ -5437,10 +5437,9 @@ define <8 x float> @test_gather_structpt2_8f32_mask_index(ptr %x, ptr %arr, <8 x
 ; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu (%rsi), %ymm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm0,8), %ymm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%ymm0,8), %ymm1 {%k1}
 ; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
 ; X64-SKX-SMALL-NEXT:    retq
 ;
@@ -5449,11 +5448,10 @@ define <8 x float> @test_gather_structpt2_8f32_mask_index(ptr %x, ptr %arr, <8 x
 ; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
 ; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu (%rsi), %ymm0
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to8}, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm0,8), %ymm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%ymm0,8), %ymm1 {%k1}
 ; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
 ; X64-SKX-LARGE-NEXT:    retq
 ;
@@ -5479,13 +5477,13 @@ define <8 x float> @test_gather_structpt2_8f32_mask_index(ptr %x, ptr %arr, <8 x
 define <8 x float> @test_gather_structpt2_8f32_mask_index_offset(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
 ; X64-KNL-LABEL: test_gather_structpt2_8f32_mask_index_offset:
 ; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
 ; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
-; X64-KNL-NEXT:    vpand (%rsi), %ymm0, %ymm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm0,8), %ymm1 {%k1}
+; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
+; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm0
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
 ; X64-KNL-NEXT:    vmovaps %ymm1, %ymm0
 ; X64-KNL-NEXT:    retq
 ;
@@ -5508,10 +5506,9 @@ define <8 x float> @test_gather_structpt2_8f32_mask_index_offset(ptr %x, ptr %ar
 ; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu (%rsi), %ymm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm0,8), %ymm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%ymm0,8), %ymm1 {%k1}
 ; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
 ; X64-SKX-SMALL-NEXT:    retq
 ;
@@ -5520,11 +5517,10 @@ define <8 x float> @test_gather_structpt2_8f32_mask_index_offset(ptr %x, ptr %ar
 ; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
 ; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu (%rsi), %ymm0
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to8}, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm0,8), %ymm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm0
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%ymm0,8), %ymm1 {%k1}
 ; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
 ; X64-SKX-LARGE-NEXT:    retq
 ;
@@ -5550,16 +5546,18 @@ define <8 x float> @test_gather_structpt2_8f32_mask_index_offset(ptr %x, ptr %ar
 define {<8 x float>, <8 x float>} @test_gather_structpt2_8f32_mask_index_pair(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
 ; X64-KNL-LABEL: test_gather_structpt2_8f32_mask_index_pair:
 ; X64-KNL:       # %bb.0:
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
 ; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
 ; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vpbroadcastd {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
-; X64-KNL-NEXT:    vpand (%rsi), %ymm0, %ymm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
+; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm2
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-KNL-NEXT:    kmovw %k1, %k2
-; X64-KNL-NEXT:    vmovaps %ymm1, %ymm0
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm0 {%k2}
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
+; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 killed $zmm1
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_structpt2_8f32_mask_index_pair:
@@ -5585,13 +5583,12 @@ define {<8 x float>, <8 x float>} @test_gather_structpt2_8f32_mask_index_pair(pt
 ; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu (%rsi), %ymm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm2
 ; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
 ; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm0 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%ymm2,8), %ymm0 {%k2}
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%ymm2,8), %ymm1 {%k1}
 ; X64-SKX-SMALL-NEXT:    retq
 ;
 ; X64-SKX-LARGE-LABEL: test_gather_structpt2_8f32_mask_index_pair:
@@ -5599,14 +5596,13 @@ define {<8 x float>, <8 x float>} @test_gather_structpt2_8f32_mask_index_pair(pt
 ; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
 ; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
 ; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu (%rsi), %ymm0
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to8}, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm2
 ; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
 ; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm0 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%ymm2,8), %ymm0 {%k2}
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%ymm2,8), %ymm1 {%k1}
 ; X64-SKX-LARGE-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt2_8f32_mask_index_pair:
@@ -5642,14 +5638,8 @@ define <16 x float> @test_gather_structpt2_16f32_mask_index(ptr %x, ptr %arr, <1
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-KNL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm0,8), %ymm3 {%k2}
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_structpt2_16f32_mask_index:
@@ -5669,16 +5659,10 @@ define <16 x float> @test_gather_structpt2_16f32_mask_index(ptr %x, ptr %arr, <1
 ; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-SMALL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm0,8), %ymm3 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-SMALL-NEXT:    retq
 ;
 ; X64-SKX-LARGE-LABEL: test_gather_structpt2_16f32_mask_index:
@@ -5686,17 +5670,11 @@ define <16 x float> @test_gather_structpt2_16f32_mask_index(ptr %x, ptr %arr, <1
 ; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-LARGE-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm0,8), %ymm3 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-LARGE-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt2_16f32_mask_index:
@@ -5726,14 +5704,8 @@ define <16 x float> @test_gather_structpt2_16f32_mask_index_offset(ptr %x, ptr %
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
 ; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-KNL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm0,8), %ymm3 {%k2}
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_structpt2_16f32_mask_index_offset:
@@ -5753,16 +5725,10 @@ define <16 x float> @test_gather_structpt2_16f32_mask_index_offset(ptr %x, ptr %
 ; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-SMALL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm0,8), %ymm3 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-SMALL-NEXT:    retq
 ;
 ; X64-SKX-LARGE-LABEL: test_gather_structpt2_16f32_mask_index_offset:
@@ -5770,17 +5736,11 @@ define <16 x float> @test_gather_structpt2_16f32_mask_index_offset(ptr %x, ptr %
 ; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vextractf64x4 $1, %zmm1, %ymm3
-; X64-SKX-LARGE-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm0,8), %ymm3 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm3, %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
 ; X64-SKX-LARGE-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt2_16f32_mask_index_offset:
@@ -5809,22 +5769,11 @@ define {<16 x float>, <16 x float>} @test_gather_structpt2_16f32_mask_index_pair
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
 ; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-KNL-NEXT:    vpmovzxdq {{.*#+}} zmm3 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-KNL-NEXT:    vextractf64x4 $1, %zmm1, %ymm4
-; X64-KNL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-KNL-NEXT:    vmovaps %ymm4, %ymm0
-; X64-KNL-NEXT:    kmovw %k2, %k3
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm3,8), %ymm0 {%k3}
-; X64-KNL-NEXT:    vmovaps %ymm1, %ymm5
-; X64-KNL-NEXT:    kmovw %k1, %k3
-; X64-KNL-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm5 {%k3}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm5, %zmm0
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm3,8), %ymm4 {%k2}
-; X64-KNL-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-KNL-NEXT:    vinsertf64x4 $1, %ymm4, %zmm1, %zmm1
+; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
+; X64-KNL-NEXT:    kmovw %k1, %k2
+; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
 ; X64-KNL-NEXT:    retq
 ;
 ; X86-KNL-LABEL: test_gather_structpt2_16f32_mask_index_pair:
@@ -5846,23 +5795,12 @@ define {<16 x float>, <16 x float>} @test_gather_structpt2_16f32_mask_index_pair
 ; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovzxdq {{.*#+}} zmm3 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-SMALL-NEXT:    vextractf64x4 $1, %zmm1, %ymm4
-; X64-SKX-SMALL-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-SMALL-NEXT:    vmovaps %ymm4, %ymm0
-; X64-SKX-SMALL-NEXT:    kmovw %k2, %k3
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm3,8), %ymm0 {%k3}
-; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm5
-; X64-SKX-SMALL-NEXT:    kmovw %k1, %k3
-; X64-SKX-SMALL-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm5 {%k3}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm0, %zmm5, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm3,8), %ymm4 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vinsertf64x4 $1, %ymm4, %zmm1, %zmm1
+; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
+; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
+; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
 ; X64-SKX-SMALL-NEXT:    retq
 ;
 ; X64-SKX-LARGE-LABEL: test_gather_structpt2_16f32_mask_index_pair:
@@ -5870,24 +5808,13 @@ define {<16 x float>, <16 x float>} @test_gather_structpt2_16f32_mask_index_pair
 ; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
 ; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
 ; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm2 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovzxdq {{.*#+}} zmm3 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
-; X64-SKX-LARGE-NEXT:    vextractf64x4 $1, %zmm1, %ymm4
-; X64-SKX-LARGE-NEXT:    kshiftrw $8, %k1, %k2
-; X64-SKX-LARGE-NEXT:    kmovw %k2, %k3
-; X64-SKX-LARGE-NEXT:    vmovaps %ymm4, %ymm0
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm3,8), %ymm0 {%k3}
-; X64-SKX-LARGE-NEXT:    kmovw %k1, %k3
-; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm5
-; X64-SKX-LARGE-NEXT:    vgatherqps (%rdi,%zmm2,8), %ymm5 {%k3}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm0, %zmm5, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm3,8), %ymm4 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherqps 4(%rdi,%zmm2,8), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vinsertf64x4 $1, %ymm4, %zmm1, %zmm1
+; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm2
+; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
+; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
+; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
+; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
 ; X64-SKX-LARGE-NEXT:    retq
 ;
 ; X86-SKX-LABEL: test_gather_structpt2_16f32_mask_index_pair:

>From d2f1352e5caab450a9b183e4d776f06d2f58c5c2 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Tue, 29 Apr 2025 19:24:57 +0530
Subject: [PATCH 14/16] Remove the unwanted tests

---
 .../test/CodeGen/X86/masked_gather_scatter.ll | 447 +-----------------
 1 file changed, 5 insertions(+), 442 deletions(-)

diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 553ac56d360ae..4aa906b1ae557 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4904,8 +4904,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
   ret <16 x float> %res
 }
 
-define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
-; X64-KNL-LABEL: test_gather_16f32_mask_index_pair:
+define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
+; X64-KNL-LABEL: test_gather_structpt_16f32_mask_index_pair:
 ; X64-KNL:       # %bb.0:
 ; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
@@ -4917,7 +4917,7 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2), %zmm1 {%k1}
 ; X64-KNL-NEXT:    retq
 ;
-; X86-KNL-LABEL: test_gather_16f32_mask_index_pair:
+; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index_pair:
 ; X86-KNL:       # %bb.0:
 ; X86-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X86-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
@@ -4931,7 +4931,7 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2), %zmm1 {%k1}
 ; X86-KNL-NEXT:    retl
 ;
-; X64-SKX-LABEL: test_gather_16f32_mask_index_pair:
+; X64-SKX-LABEL: test_gather_structpt_16f32_mask_index_pair:
 ; X64-SKX:       # %bb.0:
 ; X64-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X64-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
@@ -4943,7 +4943,7 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
 ; X64-SKX-NEXT:    vgatherdps 4(%rdi,%zmm2), %zmm1 {%k1}
 ; X64-SKX-NEXT:    retq
 ;
-; X86-SKX-LABEL: test_gather_16f32_mask_index_pair:
+; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index_pair:
 ; X86-SKX:       # %bb.0:
 ; X86-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
 ; X86-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
@@ -4968,443 +4968,6 @@ define {<16 x float>, <16 x float>} @test_gather_16f32_mask_index_pair(ptr %x, p
   ret {<16 x float>, <16 x float>} %pair2
 }
 
-define <8 x float> @test_gather_structpt_8f32_mask_index(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
-; X64-KNL-LABEL: test_gather_structpt_8f32_mask_index:
-; X64-KNL:       # %bb.0:
-; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
-; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
-; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
-; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm0
-; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-KNL-NEXT:    vmovaps %ymm1, %ymm0
-; X64-KNL-NEXT:    retq
-;
-; X86-KNL-LABEL: test_gather_structpt_8f32_mask_index:
-; X86-KNL:       # %bb.0:
-; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X86-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
-; X86-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
-; X86-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovdqu (%ecx), %ymm0
-; X86-KNL-NEXT:    vpslld $3, %ymm0, %ymm0
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0), %zmm1 {%k1}
-; X86-KNL-NEXT:    vmovaps %ymm1, %ymm0
-; X86-KNL-NEXT:    retl
-;
-; X64-SKX-SMALL-LABEL: test_gather_structpt_8f32_mask_index:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
-; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%ymm0,8), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_structpt_8f32_mask_index:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%ymm0,8), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-LARGE-NEXT:    retq
-;
-; X86-SKX-LABEL: test_gather_structpt_8f32_mask_index:
-; X86-SKX:       # %bb.0:
-; X86-SKX-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X86-SKX-NEXT:    vpslld $31, %ymm0, %ymm0
-; X86-SKX-NEXT:    vpmovd2m %ymm0, %k1
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovups (%ecx), %ymm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%ymm0,8), %ymm1 {%k1}
-; X86-SKX-NEXT:    vmovaps %ymm1, %ymm0
-; X86-SKX-NEXT:    retl
-  %wide.load = load <8 x i32>, ptr %arr, align 4
-  %and = and <8 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <8 x i32> %and to <8 x i64>
-  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <8 x i64> %zext
-  %res = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %src0)
-  ret <8 x float> %res
-}
-
-define <8 x float> @test_gather_structpt_8f32_mask_index_offset(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
-; X64-KNL-LABEL: test_gather_structpt_8f32_mask_index_offset:
-; X64-KNL:       # %bb.0:
-; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
-; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
-; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
-; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm0
-; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-KNL-NEXT:    vmovaps %ymm1, %ymm0
-; X64-KNL-NEXT:    retq
-;
-; X86-KNL-LABEL: test_gather_structpt_8f32_mask_index_offset:
-; X86-KNL:       # %bb.0:
-; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X86-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
-; X86-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
-; X86-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovdqu (%ecx), %ymm0
-; X86-KNL-NEXT:    vpslld $3, %ymm0, %ymm0
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0), %zmm1 {%k1}
-; X86-KNL-NEXT:    vmovaps %ymm1, %ymm0
-; X86-KNL-NEXT:    retl
-;
-; X64-SKX-SMALL-LABEL: test_gather_structpt_8f32_mask_index_offset:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
-; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%ymm0,8), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_structpt_8f32_mask_index_offset:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%ymm0,8), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-LARGE-NEXT:    retq
-;
-; X86-SKX-LABEL: test_gather_structpt_8f32_mask_index_offset:
-; X86-SKX:       # %bb.0:
-; X86-SKX-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X86-SKX-NEXT:    vpslld $31, %ymm0, %ymm0
-; X86-SKX-NEXT:    vpmovd2m %ymm0, %k1
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovups (%ecx), %ymm0
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%ymm0,8), %ymm1 {%k1}
-; X86-SKX-NEXT:    vmovaps %ymm1, %ymm0
-; X86-SKX-NEXT:    retl
-  %wide.load = load <8 x i32>, ptr %arr, align 4
-  %and = and <8 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <8 x i32> %and to <8 x i64>
-  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <8 x i64> %zext, i32 1
-  %res = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %src0)
-  ret <8 x float> %res
-}
-
-define {<8 x float>, <8 x float>} @test_gather_8f32_mask_index_pair(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
-; X64-KNL-LABEL: test_gather_8f32_mask_index_pair:
-; X64-KNL:       # %bb.0:
-; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X64-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
-; X64-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
-; X64-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vbroadcastss {{.*#+}} ymm0 = [536870911,536870911,536870911,536870911,536870911,536870911,536870911,536870911]
-; X64-KNL-NEXT:    vandps (%rsi), %ymm0, %ymm2
-; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-KNL-NEXT:    kmovw %k1, %k2
-; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
-; X64-KNL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; X64-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 killed $zmm1
-; X64-KNL-NEXT:    retq
-;
-; X86-KNL-LABEL: test_gather_8f32_mask_index_pair:
-; X86-KNL:       # %bb.0:
-; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 def $zmm1
-; X86-KNL-NEXT:    vpmovsxwq %xmm0, %zmm0
-; X86-KNL-NEXT:    vpsllq $63, %zmm0, %zmm0
-; X86-KNL-NEXT:    vptestmq %zmm0, %zmm0, %k1
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovdqu (%ecx), %ymm0
-; X86-KNL-NEXT:    vpslld $3, %ymm0, %ymm2
-; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X86-KNL-NEXT:    kmovw %k1, %k2
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2), %zmm0 {%k2}
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2), %zmm1 {%k1}
-; X86-KNL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
-; X86-KNL-NEXT:    # kill: def $ymm1 killed $ymm1 killed $zmm1
-; X86-KNL-NEXT:    retl
-;
-; X64-SKX-SMALL-LABEL: test_gather_8f32_mask_index_pair:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %ymm0, %ymm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %ymm0
-; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm0, %ymm2
-; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
-; X64-SKX-SMALL-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%ymm2,8), %ymm0 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%ymm2,8), %ymm1 {%k1}
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_8f32_mask_index_pair:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %ymm0, %ymm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %ymm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %ymm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vandps (%rax){1to8}, %ymm0, %ymm2
-; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
-; X64-SKX-LARGE-NEXT:    vmovaps %ymm1, %ymm0
-; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%ymm2,8), %ymm0 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%ymm2,8), %ymm1 {%k1}
-; X64-SKX-LARGE-NEXT:    retq
-;
-; X86-SKX-LABEL: test_gather_8f32_mask_index_pair:
-; X86-SKX:       # %bb.0:
-; X86-SKX-NEXT:    vpmovsxwd %xmm0, %ymm0
-; X86-SKX-NEXT:    vpslld $31, %ymm0, %ymm0
-; X86-SKX-NEXT:    vpmovd2m %ymm0, %k1
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovups (%ecx), %ymm2
-; X86-SKX-NEXT:    kmovw %k1, %k2
-; X86-SKX-NEXT:    vmovaps %ymm1, %ymm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%ymm2,8), %ymm0 {%k2}
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%ymm2,8), %ymm1 {%k1}
-; X86-SKX-NEXT:    retl
-  %wide.load = load <8 x i32>, ptr %arr, align 4
-  %and = and <8 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <8 x i32> %and to <8 x i64>
-  %ptrs1 = getelementptr inbounds %struct.pt2, ptr %x , <8 x i64> %zext
-  %res1 = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs1, i32 4, <8 x i1> %mask, <8 x float> %src0)
-  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <8 x i64> %zext, i32 1
-  %res = call <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr> %ptrs, i32 4, <8 x i1> %mask, <8 x float> %src0)
-  %pair1 = insertvalue {<8 x float>, <8 x float>} poison, <8 x float> %res1, 0
-  %pair2 = insertvalue {<8 x float>, <8 x float>} %pair1, <8 x float> %res, 1
-  ret {<8 x float>, <8 x float>} %pair2
-}
-
-define <16 x float> @test_gather_structpt_16f32_mask_index1(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
-; X64-KNL-LABEL: test_gather_structpt_16f32_mask_index1:
-; X64-KNL:       # %bb.0:
-; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-KNL-NEXT:    retq
-;
-; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index1:
-; X86-KNL:       # %bb.0:
-; X86-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X86-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovups (%ecx), %zmm0
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
-; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X86-KNL-NEXT:    retl
-;
-; X64-SKX-SMALL-LABEL: test_gather_structpt_16f32_mask_index1:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index1:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-LARGE-NEXT:    retq
-;
-; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index1:
-; X86-SKX:       # %bb.0:
-; X86-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X86-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
-; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovups (%ecx), %zmm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%zmm0,8), %zmm1 {%k1}
-; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
-; X86-SKX-NEXT:    retl
-  %wide.load = load <16 x i32>, ptr %arr, align 4
-  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <16 x i32> %and to <16 x i64>
-  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext
-  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  ret <16 x float> %res
-}
-
-define <16 x float> @test_gather_structpt_16f32_mask_index_offset1(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
-; X64-KNL-LABEL: test_gather_structpt_16f32_mask_index_offset1:
-; X64-KNL:       # %bb.0:
-; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-KNL-NEXT:    retq
-;
-; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index_offset1:
-; X86-KNL:       # %bb.0:
-; X86-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X86-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovups (%ecx), %zmm0
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
-; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X86-KNL-NEXT:    retl
-;
-; X64-SKX-SMALL-LABEL: test_gather_structpt_16f32_mask_index_offset1:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index_offset1:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
-; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-LARGE-NEXT:    retq
-;
-; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index_offset1:
-; X86-SKX:       # %bb.0:
-; X86-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X86-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
-; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovups (%ecx), %zmm0
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm0,8), %zmm1 {%k1}
-; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
-; X86-SKX-NEXT:    retl
-  %wide.load = load <16 x i32>, ptr %arr, align 4
-  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <16 x i32> %and to <16 x i64>
-  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext, i32 1
-  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  ret <16 x float> %res
-}
-
-define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(ptr %x, ptr %arr, <16 x i1> %mask, <16 x float> %src0) {
-; X64-KNL-LABEL: test_gather_structpt_16f32_mask_index_pair:
-; X64-KNL:       # %bb.0:
-; X64-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT:    vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT:    vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
-; X64-KNL-NEXT:    kmovw %k1, %k2
-; X64-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-KNL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-KNL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
-; X64-KNL-NEXT:    retq
-;
-; X86-KNL-LABEL: test_gather_structpt_16f32_mask_index_pair:
-; X86-KNL:       # %bb.0:
-; X86-KNL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X86-KNL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X86-KNL-NEXT:    vptestmd %zmm0, %zmm0, %k1
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-KNL-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-KNL-NEXT:    vmovups (%ecx), %zmm2
-; X86-KNL-NEXT:    kmovw %k1, %k2
-; X86-KNL-NEXT:    vmovaps %zmm1, %zmm0
-; X86-KNL-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
-; X86-KNL-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
-; X86-KNL-NEXT:    retl
-;
-; X64-SKX-SMALL-LABEL: test_gather_structpt_16f32_mask_index_pair:
-; X64-SKX-SMALL:       # %bb.0:
-; X64-SKX-SMALL-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT:    vmovups (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT:    vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
-; X64-SKX-SMALL-NEXT:    kmovw %k1, %k2
-; X64-SKX-SMALL-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-SMALL-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-SKX-SMALL-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
-; X64-SKX-SMALL-NEXT:    retq
-;
-; X64-SKX-LARGE-LABEL: test_gather_structpt_16f32_mask_index_pair:
-; X64-SKX-LARGE:       # %bb.0:
-; X64-SKX-LARGE-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpslld $31, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT:    vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT:    vmovups (%rsi), %zmm0
-; X64-SKX-LARGE-NEXT:    movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT:    vandps (%rax){1to16}, %zmm0, %zmm2
-; X64-SKX-LARGE-NEXT:    kmovw %k1, %k2
-; X64-SKX-LARGE-NEXT:    vmovaps %zmm1, %zmm0
-; X64-SKX-LARGE-NEXT:    vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
-; X64-SKX-LARGE-NEXT:    vgatherdps 4(%rdi,%zmm2,8), %zmm1 {%k1}
-; X64-SKX-LARGE-NEXT:    retq
-;
-; X86-SKX-LABEL: test_gather_structpt_16f32_mask_index_pair:
-; X86-SKX:       # %bb.0:
-; X86-SKX-NEXT:    vpmovsxbd %xmm0, %zmm0
-; X86-SKX-NEXT:    vpslld $31, %zmm0, %zmm0
-; X86-SKX-NEXT:    vpmovd2m %zmm0, %k1
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %eax
-; X86-SKX-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SKX-NEXT:    vmovups (%ecx), %zmm2
-; X86-SKX-NEXT:    kmovw %k1, %k2
-; X86-SKX-NEXT:    vmovaps %zmm1, %zmm0
-; X86-SKX-NEXT:    vgatherdps (%eax,%zmm2,8), %zmm0 {%k2}
-; X86-SKX-NEXT:    vgatherdps 4(%eax,%zmm2,8), %zmm1 {%k1}
-; X86-SKX-NEXT:    retl
-  %wide.load = load <16 x i32>, ptr %arr, align 4
-  %and = and <16 x i32> %wide.load, <i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911, i32 536870911>
-  %zext = zext <16 x i32> %and to <16 x i64>
-  %ptrs1 = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext
-  %res1 = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs1, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  %ptrs = getelementptr inbounds %struct.pt2, ptr %x, <16 x i64> %zext, i32 1
-  %res = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %ptrs, i32 4, <16 x i1> %mask, <16 x float> %src0)
-  %pair1 = insertvalue {<16 x float>, <16 x float>} poison, <16 x float> %res1, 0
-  %pair2 = insertvalue {<16 x float>, <16 x float>} %pair1, <16 x float> %res, 1
-  ret {<16 x float>, <16 x float>} %pair2
-}
-
 define <8 x float> @test_gather_structpt2_8f32_mask_index(ptr %x, ptr %arr, <8 x i1> %mask, <8 x float> %src0) {
 ; X64-KNL-LABEL: test_gather_structpt2_8f32_mask_index:
 ; X64-KNL:       # %bb.0:

>From ed72aa373daf038120e8fa8933426bfaff5c2a49 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Mon, 5 May 2025 11:58:48 +0530
Subject: [PATCH 15/16] Log2 handling and updating the comments

---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 29 ++++++++++++-------------
 1 file changed, 14 insertions(+), 15 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 21f0fd0995b7f..2c8ce125c184a 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56582,16 +56582,18 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
     // index truncation below.
     if (Index.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Scale)) {
       unsigned BitWidth = Index.getScalarValueSizeInBits();
-      unsigned MaskBits = BitWidth - Log2_32(Scale->getAsZExtVal());
+      unsigned ScaleAmt = Scale->getAsZExtVal();
+      assert(isPowerOf2_32(ScaleAmt) && "Scale must be a power of 2");
+      unsigned Log2ScaleAmt = Log2_32(ScaleAmt);
+      unsigned MaskBits = BitWidth - Log2ScaleAmt;
       APInt DemandedBits = APInt::getLowBitsSet(BitWidth, MaskBits);
       if (TLI.SimplifyDemandedBits(Index, DemandedBits, DCI)) {
         if (N->getOpcode() != ISD::DELETED_NODE)
           DCI.AddToWorklist(N);
         return SDValue(N, 0);
       }
-      uint64_t ScaleAmt = Scale->getAsZExtVal();
       if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {
-        if (*MinShAmt >= 1 && (*MinShAmt + Log2_64(ScaleAmt)) < 4 &&
+        if (*MinShAmt >= 1 && (*MinShAmt + Log2ScaleAmt) < 4 &&
             DAG.ComputeNumSignBits(Index.getOperand(0)) > 1) {
           SDValue ShAmt = Index.getOperand(1);
           SDValue NewShAmt =
@@ -56607,10 +56609,10 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
     }
     unsigned IndexWidth = Index.getScalarValueSizeInBits();
 
-    // Shrink indices if they are larger than 32-bits.
-    // Only do this before legalize types since v2i64 could become v2i32.
-    // FIXME: We could check that the type is legal if we're after legalize
-    // types, but then we would need to construct test cases where that happens.
+    // If the index is a left shift, \ComputeNumSignBits we are recomputing the number of sign bits
+    // from the shifted value. We are trying to enable the optimization in which
+    // we can shrink indices if they are larger than 32-bits. Using the existing
+    // fold techniques implemented below.
     unsigned ComputeNumSignBits = DAG.ComputeNumSignBits(Index);
     if (Index.getOpcode() == ISD::SHL) {
       if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {
@@ -56620,6 +56622,11 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
       }
     }
 
+    // Shrink indices if they are larger than 32-bits.
+    // Only do this before legalize types since v2i64 could become v2i32.
+    // FIXME: We could check that the type is legal if we're after legalize
+    // types, but then we would need to construct test cases where that happens.
+    // \ComputeNumSignBits value is recomputed for the shift Index
     if (IndexWidth > 32 && ComputeNumSignBits > (IndexWidth - 32)) {
       EVT NewVT = IndexVT.changeVectorElementType(MVT::i32);
 
@@ -56651,14 +56658,6 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
 
   EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
 
-  // if (Index.getOpcode() == ISD::SHL) {
-  //   unsigned BitWidth = Index.getScalarValueSizeInBits();
-  //   unsigned MaskBits = BitWidth - Log2_32(Scale->getAsZExtVal());
-  //   APInt DemandedBits = APInt::getLowBitsSet(BitWidth, MaskBits);
-  //   if (TLI.SimplifyDemandedBits(Index, DemandedBits, DCI)) {
-  //     return SDValue(N, 0);
-  //   }
-  // }
   // Try to move splat adders from the index operand to the base
   // pointer operand. Taking care to multiply by the scale. We can only do
   // this when index element type is the same as the pointer type.

>From 1644defc6bf892134a2cfb073829fbcab84bf9f4 Mon Sep 17 00:00:00 2001
From: Rohit Aggarwal <Rohit.Aggarwal at amd.com>
Date: Mon, 5 May 2025 12:59:31 +0530
Subject: [PATCH 16/16] Fix formatting

---
 llvm/lib/Target/X86/X86ISelLowering.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 2c8ce125c184a..297065f592391 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -56609,10 +56609,10 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
     }
     unsigned IndexWidth = Index.getScalarValueSizeInBits();
 
-    // If the index is a left shift, \ComputeNumSignBits we are recomputing the number of sign bits
-    // from the shifted value. We are trying to enable the optimization in which
-    // we can shrink indices if they are larger than 32-bits. Using the existing
-    // fold techniques implemented below.
+    // If the index is a left shift, \ComputeNumSignBits we are recomputing the
+    // number of sign bits from the shifted value. We are trying to enable the
+    // optimization in which we can shrink indices if they are larger than
+    // 32-bits. Using the existing fold techniques implemented below.
     unsigned ComputeNumSignBits = DAG.ComputeNumSignBits(Index);
     if (Index.getOpcode() == ISD::SHL) {
       if (auto MinShAmt = DAG.getValidMinimumShiftAmount(Index)) {



More information about the llvm-commits mailing list