[llvm] [GISel][AMDGPU] Expand ShuffleVector (PR #124527)

Alan Li via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 21 10:08:12 PST 2025


https://github.com/lialan updated https://github.com/llvm/llvm-project/pull/124527

>From 16ad8a6ae79f92c43d935846f2a38a686f3409c5 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Mon, 27 Jan 2025 18:26:19 +0800
Subject: [PATCH 01/20] First commit

---
 .../llvm/CodeGen/GlobalISel/CombinerHelper.h  |  4 ++
 .../include/llvm/Target/GlobalISel/Combine.td |  9 ++-
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 41 +++++++++++++
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 59 +++++++++++++++++++
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h  |  2 +
 5 files changed, 114 insertions(+), 1 deletion(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 9b78342c8fc39..c1c303fd18e6b 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -264,6 +264,10 @@ class CombinerHelper {
   void applyCombineShuffleConcat(MachineInstr &MI,
                                  SmallVector<Register> &Ops) const;
 
+  /// Replace \p MI with a narrow extract_subvector.
+  bool matchCombineShuffleExtract(MachineInstr &MI, int64_t &IsFirst) const;
+  void applyCombineShuffleExtract(MachineInstr &MI, int64_t IsFirst) const;
+
   /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
   /// Returns true if MI changed.
   ///
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 3590ab221ad44..30316305d9e4f 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1560,6 +1560,13 @@ def combine_shuffle_concat : GICombineRule<
         [{ return Helper.matchCombineShuffleConcat(*${root}, ${matchinfo}); }]),
   (apply [{ Helper.applyCombineShuffleConcat(*${root}, ${matchinfo}); }])>;
 
+// Combines shuffles of vector into extract_subvector
+def combine_shuffle_vector : GICombineRule<
+  (defs root:$root, int64_matchinfo:$matchinfo),
+  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+    [{ return Helper.matchCombineShuffleExtract(*${root}, ${matchinfo}); }]),
+  (apply [{ Helper.applyCombineShuffleExtract(*${root}, ${matchinfo}); }])>;
+
 def insert_vector_element_idx_undef : GICombineRule<
    (defs root:$root),
    (match (G_IMPLICIT_DEF $idx),
@@ -2026,7 +2033,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
     and_or_disjoint_mask, fma_combines, fold_binop_into_select,
     sub_add_reg, select_to_minmax,
     fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
-    simplify_neg_minmax, combine_concat_vector,
+    simplify_neg_minmax, combine_concat_vector, combine_shuffle_vector,
     sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
     combine_use_vector_truncate, merge_combines, overflow_combines]>;
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0dfbb91f2ac54..2e517304d527a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -384,6 +384,47 @@ void CombinerHelper::applyCombineConcatVectors(
   MI.eraseFromParent();
 }
 
+bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx) const {
+  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
+         "Invalid instruction");
+  auto &Shuffle = cast<GShuffleVector>(MI);
+  const auto &TLI = getTargetLowering();
+  
+  auto SrcVec1 = Shuffle.getSrc1Reg();
+  auto SrcVec2 = Shuffle.getSrc2Reg();
+  auto Mask = Shuffle.getMask();
+
+  int Width = MRI.getType(SrcVec1).getNumElements();
+
+  // Check if all elements are extracted from the same vector, or within single
+  // vector.
+  auto MaxValue = *std::max_element(Mask.begin(), Mask.end());
+  auto MinValue = *std::min_element(Mask.begin(), Mask.end());
+  if (MaxValue >= Width && MinValue < Width) {
+    return false;
+  }
+  // Check if the extractee's order is kept:
+  if (!std::is_sorted(Mask.begin(), Mask.end())) {
+    return false;
+  }
+
+  Idx = Mask.front();
+  return true;
+}
+
+void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) const {
+  auto &Shuffle = cast<GShuffleVector>(MI);
+
+  auto SrcVec1 = Shuffle.getSrc1Reg();
+  auto SrcVec2 = Shuffle.getSrc2Reg();
+  int Width = MRI.getType(SrcVec1).getNumElements();
+
+  auto SrcVec = Idx < Width ? SrcVec1 : SrcVec2;
+
+  Builder.buildExtractSubvector(MI.getOperand(0).getReg(), SrcVec, Idx);
+  MI.eraseFromParent();
+}
+
 bool CombinerHelper::matchCombineShuffleConcat(
     MachineInstr &MI, SmallVector<Register> &Ops) const {
   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 649deee346e90..66ba4c62b04c5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -29,6 +29,7 @@
 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/Register.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -1832,6 +1833,11 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
       .lower();
   }
 
+  getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
+    //.fewerElementsIf(isWideVec16(0), changeTo(0, V2S16))
+    .customFor({V8S16, V4S16})
+    .lower();
+
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {
         const LLT &EltTy = Query.Types[1].getElementType();
@@ -2127,6 +2133,8 @@ bool AMDGPULegalizerInfo::legalizeCustom(
   case TargetOpcode::G_FMINNUM_IEEE:
   case TargetOpcode::G_FMAXNUM_IEEE:
     return legalizeMinNumMaxNum(Helper, MI);
+  case TargetOpcode::G_EXTRACT_SUBVECTOR:
+    return legalizeExtractSubvector(MI, MRI, B);
   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
     return legalizeExtractVectorElt(MI, MRI, B);
   case TargetOpcode::G_INSERT_VECTOR_ELT:
@@ -2713,6 +2721,57 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
   return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
 }
 
+static auto buildExtractSubvector(MachineIRBuilder &B, SrcOp Src,
+                                  LLT DstTy, unsigned Start) {
+  SmallVector<Register, 8> Subvectors;
+  for (unsigned i = Start, e = Start + DstTy.getNumElements(); i != e; ++i) {
+    Subvectors.push_back(
+        B.buildExtractVectorElementConstant(DstTy.getElementType(), Src, i)
+            .getReg(0));
+  }
+  return B.buildBuildVector(DstTy, Subvectors);
+}
+
+bool AMDGPULegalizerInfo::legalizeExtractSubvector(
+  MachineInstr &MI, MachineRegisterInfo &MRI,
+  MachineIRBuilder &B) const {
+  const auto &Instr = llvm::cast<GExtractSubvector>(MI);
+  Register Src = Instr.getSrcVec();
+  Register Dst = MI.getOperand(0).getReg();
+  auto Start = Instr.getIndexImm();
+
+  LLT SrcTy = MRI.getType(Src);
+  LLT DstTy = MRI.getType(Dst);
+
+  LLT EltTy = SrcTy.getElementType();
+  assert(EltTy == DstTy.getElementType());
+  auto Count = DstTy.getNumElements();
+  assert(SrcTy.getNumElements() % 2 == 0 && Count % 2 == 0);
+
+  // Split vector size into legal sub vectors, and use build_vector
+  // to merge the result.
+  if (EltTy.getScalarSizeInBits() == 16 && Start % 2 == 0) {
+    bool UseScalar = Count == 2;
+    // Extract 32-bit registers at a time.
+    LLT NewSrcTy =
+        UseScalar ? S32 : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
+    auto Bitcasted = B.buildBitcast(NewSrcTy, Src).getReg(0);
+    LLT NewDstTy = LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+
+    SmallVector<Register, 8> Subvectors;
+    for (unsigned i = Start / 2, e = (Start + Count) / 2; i != e; ++i) {
+      auto Subvec = B.buildExtractVectorElementConstant(S32, Bitcasted, i);
+      Subvectors.push_back(Subvec.getReg(0));
+    }
+
+    auto BuildVec = B.buildBuildVector(NewDstTy, Subvectors);
+    B.buildBitcast(Dst, BuildVec.getReg(0));
+    MI.eraseFromParent();
+    return true;
+  }
+  return false;
+}
+
 bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
   MachineInstr &MI, MachineRegisterInfo &MRI,
   MachineIRBuilder &B) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 03b7c36fc450f..f50dbd028ce2b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -56,6 +56,8 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {
   bool legalizeFPTOI(MachineInstr &MI, MachineRegisterInfo &MRI,
                      MachineIRBuilder &B, bool Signed) const;
   bool legalizeMinNumMaxNum(LegalizerHelper &Helper, MachineInstr &MI) const;
+  bool legalizeExtractSubvector(MachineInstr &MI, MachineRegisterInfo &MRI,
+                                MachineIRBuilder &B) const;
   bool legalizeExtractVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
                                 MachineIRBuilder &B) const;
   bool legalizeInsertVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,

>From 932c0767bc87bda31807a095e2e303b8db1225e4 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Mon, 27 Jan 2025 19:17:12 +0800
Subject: [PATCH 02/20] update

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 14 ++++++++++++--
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 12 ++++--------
 2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 2e517304d527a..fb91d7446319a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -388,13 +388,16 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
          "Invalid instruction");
   auto &Shuffle = cast<GShuffleVector>(MI);
-  const auto &TLI = getTargetLowering();
   
   auto SrcVec1 = Shuffle.getSrc1Reg();
-  auto SrcVec2 = Shuffle.getSrc2Reg();
+  int SrcVec2 = Shuffle.getSrc2Reg();
   auto Mask = Shuffle.getMask();
 
   int Width = MRI.getType(SrcVec1).getNumElements();
+  int Width2 = MRI.getType(SrcVec2).getNumElements();
+
+  if (!llvm::isPowerOf2_32(Width))
+    return false;
 
   // Check if all elements are extracted from the same vector, or within single
   // vector.
@@ -403,6 +406,13 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   if (MaxValue >= Width && MinValue < Width) {
     return false;
   }
+
+  // Check that the extractee length is power of 2.
+  if ((MaxValue < Width && !llvm::isPowerOf2_32(Width)) ||
+      (MinValue >= Width && !llvm::isPowerOf2_32(Width2))) {
+    return false;
+  }
+
   // Check if the extractee's order is kept:
   if (!std::is_sorted(Mask.begin(), Mask.end())) {
     return false;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 66ba4c62b04c5..0853070eb9c61 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2755,16 +2755,12 @@ bool AMDGPULegalizerInfo::legalizeExtractSubvector(
     // Extract 32-bit registers at a time.
     LLT NewSrcTy =
         UseScalar ? S32 : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-    auto Bitcasted = B.buildBitcast(NewSrcTy, Src).getReg(0);
     LLT NewDstTy = LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+    auto Bitcasted = B.buildBitcast(NewSrcTy, Src);
 
-    SmallVector<Register, 8> Subvectors;
-    for (unsigned i = Start / 2, e = (Start + Count) / 2; i != e; ++i) {
-      auto Subvec = B.buildExtractVectorElementConstant(S32, Bitcasted, i);
-      Subvectors.push_back(Subvec.getReg(0));
-    }
-
-    auto BuildVec = B.buildBuildVector(NewDstTy, Subvectors);
+    auto BuildVec =
+        UseScalar ? Bitcasted
+                  : buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
     B.buildBitcast(Dst, BuildVec.getReg(0));
     MI.eraseFromParent();
     return true;

>From 05c27373ac4eb149d0ff336fae676dccc8086da6 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Mon, 27 Jan 2025 22:15:15 +0800
Subject: [PATCH 03/20] small fix

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 12 ++++++++++--
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |  4 ++--
 2 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index fb91d7446319a..88b0dfd46652a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -407,14 +407,22 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
+  LLT ExtractTy =  MaxValue < Width ? MRI.getType(SrcVec1) : MRI.getType(SrcVec2);
+
   // Check that the extractee length is power of 2.
   if ((MaxValue < Width && !llvm::isPowerOf2_32(Width)) ||
       (MinValue >= Width && !llvm::isPowerOf2_32(Width2))) {
     return false;
   }
 
-  // Check if the extractee's order is kept:
-  if (!std::is_sorted(Mask.begin(), Mask.end())) {
+  // Check if the extractee's order is kept, and they should be conscecutive.
+  for (size_t i = 1; i < Mask.size(); ++i) {
+    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
+      return false; // Not consecutive
+    }
+  }
+
+  if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {
     return false;
   }
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 0853070eb9c61..bf91f258a3c24 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1834,8 +1834,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-    //.fewerElementsIf(isWideVec16(0), changeTo(0, V2S16))
-    .customFor({V8S16, V4S16})
+    .widenScalarOrEltToNextPow2(0)
+    .customFor(AllS16Vectors)
     .lower();
 
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)

>From 21e74902ba513f99edff53b5d4a8d371ab298650 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 28 Jan 2025 12:44:48 +0800
Subject: [PATCH 04/20] Fix scalar corner issues.

---
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index bf91f258a3c24..8652ef63e798d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2724,6 +2724,9 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
 static auto buildExtractSubvector(MachineIRBuilder &B, SrcOp Src,
                                   LLT DstTy, unsigned Start) {
   SmallVector<Register, 8> Subvectors;
+  if (!DstTy.isVector()) {
+    return B.buildExtractVectorElementConstant(DstTy, Src, Start);
+  }
   for (unsigned i = Start, e = Start + DstTy.getNumElements(); i != e; ++i) {
     Subvectors.push_back(
         B.buildExtractVectorElementConstant(DstTy.getElementType(), Src, i)
@@ -2751,16 +2754,16 @@ bool AMDGPULegalizerInfo::legalizeExtractSubvector(
   // Split vector size into legal sub vectors, and use build_vector
   // to merge the result.
   if (EltTy.getScalarSizeInBits() == 16 && Start % 2 == 0) {
-    bool UseScalar = Count == 2;
     // Extract 32-bit registers at a time.
-    LLT NewSrcTy =
-        UseScalar ? S32 : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-    LLT NewDstTy = LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+    LLT NewSrcTy = SrcTy.getNumElements() == 2
+                       ? S32
+                       : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
     auto Bitcasted = B.buildBitcast(NewSrcTy, Src);
 
-    auto BuildVec =
-        UseScalar ? Bitcasted
-                  : buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
+    LLT NewDstTy = DstTy.getNumElements() == 2
+                       ? S32
+                       : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+    auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
     B.buildBitcast(Dst, BuildVec.getReg(0));
     MI.eraseFromParent();
     return true;

>From 89d71194823aae1f74015f37b21c3df27ce2a82d Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 07:28:28 -0800
Subject: [PATCH 05/20] update

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 88b0dfd46652a..e28f93711b215 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -37,6 +37,7 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Target/TargetMachine.h"
+#include <algorithm>
 #include <cmath>
 #include <optional>
 #include <tuple>
@@ -415,11 +416,13 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
-  // Check if the extractee's order is kept, and they should be conscecutive.
-  for (size_t i = 1; i < Mask.size(); ++i) {
-    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
-      return false; // Not consecutive
-    }
+  // Check if the extractee's order is kept, and they should be consecutive.
+  bool isConsecutive =
+      std::adjacent_find(Mask.begin(), Mask.end(), [](int a, int b) {
+        return b != a + 1 || b == -1;
+      }) == Mask.end();
+  if (!isConsecutive) {
+    return false;
   }
 
   if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {

>From 9f57efe442ee3d6fe0344c0f1a512567fc2096d3 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 13:03:32 -0800
Subject: [PATCH 06/20] Checkup

---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |  3 +
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 14 ++---
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    | 33 ++++++++++
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 63 ++++++++++---------
 4 files changed, 78 insertions(+), 35 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 4e18f5cc913a7..ac2a763b22402 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -347,6 +347,9 @@ class LegalizerHelper {
   LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
                                              LLT HalfTy, LLT ShiftAmtTy);
 
+  LegalizeResult fewerElementsExtractSubvector(MachineInstr &MI,
+                                               unsigned TypeIdx, LLT NarrowTy);
+
   LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
                                                unsigned TypeIdx, LLT NarrowTy);
   LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index e28f93711b215..baf1232e098c8 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -416,18 +416,18 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
-  // Check if the extractee's order is kept, and they should be consecutive.
-  bool isConsecutive =
-      std::adjacent_find(Mask.begin(), Mask.end(), [](int a, int b) {
-        return b != a + 1 || b == -1;
-      }) == Mask.end();
-  if (!isConsecutive) {
-    return false;
+  // Check if the extractee's order is kept, and they should be conscecutive.
+  for (size_t i = 1; i < Mask.size(); ++i) {
+    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
+      return false; // Not consecutive
+    }
   }
 
+  /*
   if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {
     return false;
   }
+  */
 
   Idx = Mask.front();
   return true;
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 3fb1347b58e4b..1f12932efc09f 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -5463,6 +5463,8 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
     return fewerElementsVectorSeqReductions(MI, TypeIdx, NarrowTy);
   case G_SHUFFLE_VECTOR:
     return fewerElementsVectorShuffle(MI, TypeIdx, NarrowTy);
+  case G_EXTRACT_SUBVECTOR:
+    return fewerElementsExtractSubvector(MI, TypeIdx, NarrowTy);
   case G_FPOWI:
     return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*pow*/});
   case G_BITCAST:
@@ -5643,6 +5645,36 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorShuffle(
   return Legalized;
 }
 
+LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsExtractSubvector(
+    MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
+  assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
+  if (TypeIdx != 0)
+    return UnableToLegalize;
+
+  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+
+
+  if (!isPowerOf2_32(DstTy.getNumElements()))
+    return UnableToLegalize;
+
+  uint64_t SplitIdx = MI.getOperand(2).getImm();
+  unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
+  unsigned SplitParts = DstTy.getNumElements() / NewElts;
+  
+  // Split the sources into NarrowTy size pieces.
+  SmallVector<Register> SplitDstParts;
+
+  for (unsigned i = 0; i < SplitParts; i++) {
+    auto Part = MIRBuilder.buildExtractSubvector(NarrowTy, SrcReg,
+                                                 i * NewElts + SplitIdx);
+    SplitDstParts.push_back(Part.getReg(0));
+  }
+
+  MIRBuilder.buildMergeLikeInstr(DstReg, SplitDstParts);
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorReductions(
     MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
   auto &RdxMI = cast<GVecReduce>(MI);
@@ -6049,6 +6081,7 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
   unsigned Opc = MI.getOpcode();
   switch (Opc) {
   case TargetOpcode::G_IMPLICIT_DEF:
+  case TargetOpcode::G_EXTRACT_SUBVECTOR:
   case TargetOpcode::G_LOAD: {
     if (TypeIdx != 0)
       return UnableToLegalize;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 8652ef63e798d..d0ece8e2b5c00 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -108,6 +108,18 @@ static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   };
 }
 
+static LegalizeMutation fewerEltsToSize32Vector(unsigned TypeIdx) {
+  return [=](const LegalityQuery &Query) {
+    const LLT Ty = Query.Types[TypeIdx];
+    const LLT EltTy = Ty.getElementType();
+    unsigned Size = Ty.getSizeInBits();
+    unsigned Pieces = llvm::divideCeil(Size, 32);
+    unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
+    return std::pair(TypeIdx, LLT::scalarOrVector(
+                                  ElementCount::getFixed(NewNumElts), EltTy));
+  };
+}
+
 static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -1834,9 +1846,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-    .widenScalarOrEltToNextPow2(0)
-    .customFor(AllS16Vectors)
-    .lower();
+      .widenScalarOrEltToNextPow2(0, 2)
+      .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
+      .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize32Vector(0))
+      .customIf(sizeIsMultipleOf32(0));
 
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {
@@ -2739,36 +2752,30 @@ bool AMDGPULegalizerInfo::legalizeExtractSubvector(
   MachineInstr &MI, MachineRegisterInfo &MRI,
   MachineIRBuilder &B) const {
   const auto &Instr = llvm::cast<GExtractSubvector>(MI);
-  Register Src = Instr.getSrcVec();
-  Register Dst = MI.getOperand(0).getReg();
-  auto Start = Instr.getIndexImm();
 
-  LLT SrcTy = MRI.getType(Src);
-  LLT DstTy = MRI.getType(Dst);
+  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+  auto Start = Instr.getIndexImm();
 
   LLT EltTy = SrcTy.getElementType();
   assert(EltTy == DstTy.getElementType());
-  auto Count = DstTy.getNumElements();
-  assert(SrcTy.getNumElements() % 2 == 0 && Count % 2 == 0);
-
-  // Split vector size into legal sub vectors, and use build_vector
-  // to merge the result.
-  if (EltTy.getScalarSizeInBits() == 16 && Start % 2 == 0) {
-    // Extract 32-bit registers at a time.
-    LLT NewSrcTy = SrcTy.getNumElements() == 2
-                       ? S32
-                       : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-    auto Bitcasted = B.buildBitcast(NewSrcTy, Src);
-
-    LLT NewDstTy = DstTy.getNumElements() == 2
-                       ? S32
-                       : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
-    auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
-    B.buildBitcast(Dst, BuildVec.getReg(0));
-    MI.eraseFromParent();
-    return true;
+
+  if (DstTy.getSizeInBits() % 32 != 0) {
+    return false;
   }
-  return false;
+
+  // Extract 32-bit registers at a time.
+  LLT NewSrcTy = SrcTy.getNumElements() == 2
+                     ? S32
+                     : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
+  auto Bitcasted = B.buildBitcast(NewSrcTy, SrcReg);
+
+  LLT NewDstTy = DstTy.getNumElements() == 2
+                     ? S32
+                     : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+  auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
+  B.buildBitcast(DstReg, BuildVec.getReg(0));
+  MI.eraseFromParent();
+  return true;
 }
 
 bool AMDGPULegalizerInfo::legalizeExtractVectorElt(

>From dc7e7c2fcb1a44ed1143319dd8bd285f5e76d5bd Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 14:45:07 -0800
Subject: [PATCH 07/20] update

---
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |  8 -----
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 32 ++++++++++++++++---
 2 files changed, 28 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index baf1232e098c8..0b06683053fbd 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -408,8 +408,6 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
-  LLT ExtractTy =  MaxValue < Width ? MRI.getType(SrcVec1) : MRI.getType(SrcVec2);
-
   // Check that the extractee length is power of 2.
   if ((MaxValue < Width && !llvm::isPowerOf2_32(Width)) ||
       (MinValue >= Width && !llvm::isPowerOf2_32(Width2))) {
@@ -423,12 +421,6 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     }
   }
 
-  /*
-  if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {
-    return false;
-  }
-  */
-
   Idx = Mask.front();
   return true;
 }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index d0ece8e2b5c00..e8bf05f437cc3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -84,6 +84,18 @@ static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
   };
 }
 
+static LegalityPredicate isSmallWideVector(unsigned TypeIdx) {
+  return [=](const LegalityQuery &Query) {
+    const LLT Ty = Query.Types[TypeIdx];
+    if (!Ty.isVector())
+      return false;
+
+    const LLT EltTy = Ty.getElementType();
+    const unsigned EltSize = EltTy.getSizeInBits();
+    return Ty.getSizeInBits() > 32 && EltSize < 32;
+  };
+}
+
 static LegalityPredicate sizeIsMultipleOf32(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -99,6 +111,17 @@ static LegalityPredicate isWideVec16(unsigned TypeIdx) {
   };
 }
 
+static LegalityPredicate isVectorOfEltsNoBiggerThan(unsigned TypeIdx, int Size) {
+  return [=](const LegalityQuery &Query) {
+    const LLT Ty = Query.Types[TypeIdx];
+    if (!Ty.isVector())
+      return false;
+
+    const LLT EltTy = Ty.getElementType();
+    return EltTy.getSizeInBits() <= 32;
+  };
+}
+
 static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -172,6 +195,7 @@ static LegalizeMutation moreElementsToNextExistingRegClass(unsigned TypeIdx) {
   };
 }
 
+
 static LLT getBufferRsrcScalarType(const LLT Ty) {
   if (!Ty.isVector())
     return LLT::scalar(128);
@@ -1846,10 +1870,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-      .widenScalarOrEltToNextPow2(0, 2)
-      .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
-      .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize32Vector(0))
-      .customIf(sizeIsMultipleOf32(0));
+      //.widenScalarOrEltToNextPow2(0, 2)
+      //.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
+      //.fewerElementsIf(isSmallWideVector(0), fewerEltsToSize32Vector(0))
+      .customIf(all(isVectorOfEltsNoBiggerThan(0, 32), sizeIsMultipleOf32(0)));
 
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {

>From 945950cd410fe62198996c7c9fc971731e7116d0 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 15:09:14 -0800
Subject: [PATCH 08/20] remove uneeded parts

---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |  3 -
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |  4 +-
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    | 33 -------
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 89 -------------------
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h  |  2 -
 5 files changed, 2 insertions(+), 129 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index ac2a763b22402..4e18f5cc913a7 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -347,9 +347,6 @@ class LegalizerHelper {
   LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
                                              LLT HalfTy, LLT ShiftAmtTy);
 
-  LegalizeResult fewerElementsExtractSubvector(MachineInstr &MI,
-                                               unsigned TypeIdx, LLT NarrowTy);
-
   LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
                                                unsigned TypeIdx, LLT NarrowTy);
   LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0b06683053fbd..b8a800ba0da03 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -37,7 +37,6 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Target/TargetMachine.h"
-#include <algorithm>
 #include <cmath>
 #include <optional>
 #include <tuple>
@@ -430,11 +429,12 @@ void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) c
 
   auto SrcVec1 = Shuffle.getSrc1Reg();
   auto SrcVec2 = Shuffle.getSrc2Reg();
+  auto EltTy = MRI.getType(SrcVec1).getElementType();
   int Width = MRI.getType(SrcVec1).getNumElements();
 
   auto SrcVec = Idx < Width ? SrcVec1 : SrcVec2;
 
-  Builder.buildExtractSubvector(MI.getOperand(0).getReg(), SrcVec, Idx);
+  Builder.buildExtract(MI.getOperand(0).getReg(), SrcVec, Idx * EltTy.getSizeInBits());
   MI.eraseFromParent();
 }
 
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 1f12932efc09f..3fb1347b58e4b 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -5463,8 +5463,6 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
     return fewerElementsVectorSeqReductions(MI, TypeIdx, NarrowTy);
   case G_SHUFFLE_VECTOR:
     return fewerElementsVectorShuffle(MI, TypeIdx, NarrowTy);
-  case G_EXTRACT_SUBVECTOR:
-    return fewerElementsExtractSubvector(MI, TypeIdx, NarrowTy);
   case G_FPOWI:
     return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*pow*/});
   case G_BITCAST:
@@ -5645,36 +5643,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorShuffle(
   return Legalized;
 }
 
-LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsExtractSubvector(
-    MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
-  assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
-  if (TypeIdx != 0)
-    return UnableToLegalize;
-
-  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
-
-
-  if (!isPowerOf2_32(DstTy.getNumElements()))
-    return UnableToLegalize;
-
-  uint64_t SplitIdx = MI.getOperand(2).getImm();
-  unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
-  unsigned SplitParts = DstTy.getNumElements() / NewElts;
-  
-  // Split the sources into NarrowTy size pieces.
-  SmallVector<Register> SplitDstParts;
-
-  for (unsigned i = 0; i < SplitParts; i++) {
-    auto Part = MIRBuilder.buildExtractSubvector(NarrowTy, SrcReg,
-                                                 i * NewElts + SplitIdx);
-    SplitDstParts.push_back(Part.getReg(0));
-  }
-
-  MIRBuilder.buildMergeLikeInstr(DstReg, SplitDstParts);
-  MI.eraseFromParent();
-  return Legalized;
-}
-
 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorReductions(
     MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
   auto &RdxMI = cast<GVecReduce>(MI);
@@ -6081,7 +6049,6 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
   unsigned Opc = MI.getOpcode();
   switch (Opc) {
   case TargetOpcode::G_IMPLICIT_DEF:
-  case TargetOpcode::G_EXTRACT_SUBVECTOR:
   case TargetOpcode::G_LOAD: {
     if (TypeIdx != 0)
       return UnableToLegalize;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index e8bf05f437cc3..649deee346e90 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -29,7 +29,6 @@
 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
-#include "llvm/CodeGen/Register.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -84,18 +83,6 @@ static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
   };
 }
 
-static LegalityPredicate isSmallWideVector(unsigned TypeIdx) {
-  return [=](const LegalityQuery &Query) {
-    const LLT Ty = Query.Types[TypeIdx];
-    if (!Ty.isVector())
-      return false;
-
-    const LLT EltTy = Ty.getElementType();
-    const unsigned EltSize = EltTy.getSizeInBits();
-    return Ty.getSizeInBits() > 32 && EltSize < 32;
-  };
-}
-
 static LegalityPredicate sizeIsMultipleOf32(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -111,17 +98,6 @@ static LegalityPredicate isWideVec16(unsigned TypeIdx) {
   };
 }
 
-static LegalityPredicate isVectorOfEltsNoBiggerThan(unsigned TypeIdx, int Size) {
-  return [=](const LegalityQuery &Query) {
-    const LLT Ty = Query.Types[TypeIdx];
-    if (!Ty.isVector())
-      return false;
-
-    const LLT EltTy = Ty.getElementType();
-    return EltTy.getSizeInBits() <= 32;
-  };
-}
-
 static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -131,18 +107,6 @@ static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   };
 }
 
-static LegalizeMutation fewerEltsToSize32Vector(unsigned TypeIdx) {
-  return [=](const LegalityQuery &Query) {
-    const LLT Ty = Query.Types[TypeIdx];
-    const LLT EltTy = Ty.getElementType();
-    unsigned Size = Ty.getSizeInBits();
-    unsigned Pieces = llvm::divideCeil(Size, 32);
-    unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
-    return std::pair(TypeIdx, LLT::scalarOrVector(
-                                  ElementCount::getFixed(NewNumElts), EltTy));
-  };
-}
-
 static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -195,7 +159,6 @@ static LegalizeMutation moreElementsToNextExistingRegClass(unsigned TypeIdx) {
   };
 }
 
-
 static LLT getBufferRsrcScalarType(const LLT Ty) {
   if (!Ty.isVector())
     return LLT::scalar(128);
@@ -1869,12 +1832,6 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
       .lower();
   }
 
-  getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-      //.widenScalarOrEltToNextPow2(0, 2)
-      //.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
-      //.fewerElementsIf(isSmallWideVector(0), fewerEltsToSize32Vector(0))
-      .customIf(all(isVectorOfEltsNoBiggerThan(0, 32), sizeIsMultipleOf32(0)));
-
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {
         const LLT &EltTy = Query.Types[1].getElementType();
@@ -2170,8 +2127,6 @@ bool AMDGPULegalizerInfo::legalizeCustom(
   case TargetOpcode::G_FMINNUM_IEEE:
   case TargetOpcode::G_FMAXNUM_IEEE:
     return legalizeMinNumMaxNum(Helper, MI);
-  case TargetOpcode::G_EXTRACT_SUBVECTOR:
-    return legalizeExtractSubvector(MI, MRI, B);
   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
     return legalizeExtractVectorElt(MI, MRI, B);
   case TargetOpcode::G_INSERT_VECTOR_ELT:
@@ -2758,50 +2713,6 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
   return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
 }
 
-static auto buildExtractSubvector(MachineIRBuilder &B, SrcOp Src,
-                                  LLT DstTy, unsigned Start) {
-  SmallVector<Register, 8> Subvectors;
-  if (!DstTy.isVector()) {
-    return B.buildExtractVectorElementConstant(DstTy, Src, Start);
-  }
-  for (unsigned i = Start, e = Start + DstTy.getNumElements(); i != e; ++i) {
-    Subvectors.push_back(
-        B.buildExtractVectorElementConstant(DstTy.getElementType(), Src, i)
-            .getReg(0));
-  }
-  return B.buildBuildVector(DstTy, Subvectors);
-}
-
-bool AMDGPULegalizerInfo::legalizeExtractSubvector(
-  MachineInstr &MI, MachineRegisterInfo &MRI,
-  MachineIRBuilder &B) const {
-  const auto &Instr = llvm::cast<GExtractSubvector>(MI);
-
-  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
-  auto Start = Instr.getIndexImm();
-
-  LLT EltTy = SrcTy.getElementType();
-  assert(EltTy == DstTy.getElementType());
-
-  if (DstTy.getSizeInBits() % 32 != 0) {
-    return false;
-  }
-
-  // Extract 32-bit registers at a time.
-  LLT NewSrcTy = SrcTy.getNumElements() == 2
-                     ? S32
-                     : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-  auto Bitcasted = B.buildBitcast(NewSrcTy, SrcReg);
-
-  LLT NewDstTy = DstTy.getNumElements() == 2
-                     ? S32
-                     : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
-  auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
-  B.buildBitcast(DstReg, BuildVec.getReg(0));
-  MI.eraseFromParent();
-  return true;
-}
-
 bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
   MachineInstr &MI, MachineRegisterInfo &MRI,
   MachineIRBuilder &B) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index f50dbd028ce2b..03b7c36fc450f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -56,8 +56,6 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {
   bool legalizeFPTOI(MachineInstr &MI, MachineRegisterInfo &MRI,
                      MachineIRBuilder &B, bool Signed) const;
   bool legalizeMinNumMaxNum(LegalizerHelper &Helper, MachineInstr &MI) const;
-  bool legalizeExtractSubvector(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                MachineIRBuilder &B) const;
   bool legalizeExtractVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
                                 MachineIRBuilder &B) const;
   bool legalizeInsertVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,

>From 1b49d5f90b470721da9c97171040c220b060b96c Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 17:36:57 -0800
Subject: [PATCH 09/20] Update AMDGPU GISel tests

---
 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll  | 2 --
 .../CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll     | 3 +--
 2 files changed, 1 insertion(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll
index 63b139bb25e77..0c46ccda17640 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll
@@ -184,8 +184,6 @@ define amdgpu_kernel void @tbuffer_store_d16_xyz(<4 x i32> %rsrc, <4 x half> %da
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX12-PACKED-GISEL-NEXT:    s_wait_kmcnt 0x0
-; GFX12-PACKED-GISEL-NEXT:    s_pack_lh_b32_b16 s6, s6, s6
-; GFX12-PACKED-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX12-PACKED-GISEL-NEXT:    tbuffer_store_d16_format_xyzw v[0:1], off, s[0:3], null format:[BUF_FMT_10_10_10_2_SNORM]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll
index 17ebb1a835462..2d5c95156c6f2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll
@@ -204,10 +204,9 @@ define amdgpu_kernel void @tbuffer_store_d16_xyz(<4 x i32> %rsrc, <4 x half> %da
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b96 s[8:10], s[4:5], 0x10
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x0
 ; GFX12-PACKED-GISEL-NEXT:    s_wait_kmcnt 0x0
-; GFX12-PACKED-GISEL-NEXT:    s_pack_lh_b32_b16 s8, s8, s8
-; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX12-PACKED-GISEL-NEXT:    tbuffer_store_d16_format_xyzw v[0:1], v2, s[0:3], null format:[BUF_FMT_10_10_10_2_SNORM] idxen
 ; GFX12-PACKED-GISEL-NEXT:    s_endpgm
 main_body:

>From b8369e34f2178b5379e6c0bd60c2548809460c41 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 18:36:36 -0800
Subject: [PATCH 10/20] Fix some AArch64 tests

---
 llvm/test/CodeGen/AArch64/aarch64-smull.ll |  86 ++++++++---------
 llvm/test/CodeGen/AArch64/arm64-vabs.ll    | 105 ++++++++++++---------
 llvm/test/CodeGen/AArch64/arm64-vadd.ll    |  67 ++++++++-----
 llvm/test/CodeGen/AArch64/arm64-vshift.ll  |  30 +++---
 4 files changed, 156 insertions(+), 132 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 3b589d3480179..390953490e87d 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -4,7 +4,16 @@
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; CHECK-GI:       warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v4i32_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v4i32_uzp1
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pmlsl_pmlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v4i32_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v4i32_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for do_stuff
 
 define <8 x i16> @smull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_v8i8_v8i16:
@@ -2030,9 +2039,8 @@ define void @smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2065,9 +2073,8 @@ define void @umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2100,9 +2107,8 @@ define void @smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
+; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2135,9 +2141,8 @@ define void @umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
+; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2198,14 +2203,11 @@ define void @smlsl_smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
-; CHECK-GI-NEXT:    smull v2.8h, v3.8b, v2.8b
-; CHECK-GI-NEXT:    smlal v2.8h, v0.8b, v4.8b
-; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2244,14 +2246,11 @@ define void @umlsl_umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
-; CHECK-GI-NEXT:    umull v2.8h, v3.8b, v2.8b
-; CHECK-GI-NEXT:    umlal v2.8h, v0.8b, v4.8b
-; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2290,14 +2289,11 @@ define void @smlsl_smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
-; CHECK-GI-NEXT:    smull v2.4s, v3.4h, v2.4h
-; CHECK-GI-NEXT:    smlal v2.4s, v0.4h, v4.4h
-; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2336,14 +2332,11 @@ define void @umlsl_umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
-; CHECK-GI-NEXT:    umull v2.4s, v3.4h, v2.4h
-; CHECK-GI-NEXT:    umlal v2.4s, v0.4h, v4.4h
-; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2380,9 +2373,8 @@ define <2 x i32> @do_stuff(<2 x i64> %0, <2 x i64> %1) {
 ;
 ; CHECK-GI-LABEL: do_stuff:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
-; CHECK-GI-NEXT:    mov d2, v1.d[1]
-; CHECK-GI-NEXT:    smull v0.2d, v2.2s, v0.2s
+; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v0.4s
+; CHECK-GI-NEXT:    smull2 v0.2d, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-GI-NEXT:    add v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index fe4657186cd2a..bb598bef8ad54 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -2,9 +2,26 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:  warning: Instruction selection used fallback path for fabds
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fabdd
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uabd_i64
+; CHECK-GI:  warning: Instruction selection used fallback path for sabdl2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabds
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabdd
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabd_i64
+
 
 define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl8h:
@@ -58,9 +75,9 @@ define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    sabdl2.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    sabdl.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -81,9 +98,9 @@ define <4 x i32> @sabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    sabdl2.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    sabdl.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -104,9 +121,9 @@ define <2 x i64> @sabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -169,9 +186,9 @@ define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uabdl2.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uabdl.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -193,9 +210,9 @@ define <4 x i32> @uabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uabdl2.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uabdl.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -216,9 +233,9 @@ define <2 x i64> @uabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1146,10 +1163,10 @@ define <8 x i16> @sabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    sabal2.8h v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    sabal.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1173,10 +1190,10 @@ define <4 x i32> @sabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    sabal2.4s v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    sabal.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1200,10 +1217,10 @@ define <2 x i64> @sabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    sabal2.2d v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    sabal.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1278,10 +1295,10 @@ define <8 x i16> @uabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    uabal2.8h v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    uabal.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1305,10 +1322,10 @@ define <4 x i32> @uabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    uabal2.4s v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    uabal.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1332,10 +1349,10 @@ define <2 x i64> @uabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    uabal2.2d v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    uabal.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1608,9 +1625,8 @@ define <2 x i64> @uabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.2s v1, w0
-; CHECK-GI-NEXT:    mov d0, v0[1]
-; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.4s v1, w0
+; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1643,9 +1659,8 @@ define <2 x i64> @sabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: sabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.2s v1, w0
-; CHECK-GI-NEXT:    mov d0, v0[1]
-; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.4s v1, w0
+; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vadd.ll b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
index d982dbbb1f69b..017873158e562 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
@@ -2,8 +2,29 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:         warning: Instruction selection used fallback path for saddlp1d
+; CHECK-GI:    warning: Instruction selection used fallback path for saddl2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddlp1d
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddlp1d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl_duplhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_duplhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl2_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl_duplhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl2_duplhs
+
 
 define <8 x i8> @addhn8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: addhn8b:
@@ -416,8 +437,8 @@ define <8 x i16> @uaddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uaddw2 v0.8h, v0.8h, v1.16b
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uaddw v0.8h, v0.8h, v1.8b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -440,8 +461,8 @@ define <4 x i32> @uaddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uaddw2 v0.4s, v0.4s, v1.8h
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uaddw v0.4s, v0.4s, v1.4h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -464,8 +485,8 @@ define <2 x i64> @uaddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uaddw2 v0.2d, v0.2d, v1.4s
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uaddw v0.2d, v0.2d, v1.2s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -530,8 +551,8 @@ define <8 x i16> @saddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    saddw2 v0.8h, v0.8h, v1.16b
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    saddw v0.8h, v0.8h, v1.8b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -554,8 +575,8 @@ define <4 x i32> @saddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    saddw2 v0.4s, v0.4s, v1.8h
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    saddw v0.4s, v0.4s, v1.4h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -578,8 +599,8 @@ define <2 x i64> @saddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    saddw2 v0.2d, v0.2d, v1.4s
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    saddw v0.2d, v0.2d, v1.2s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -1048,9 +1069,8 @@ define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uaddl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-GI-NEXT:    uaddw2 v0.2d, v1.2d, v0.4s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    uaddl2 v0.2d, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1091,9 +1111,8 @@ define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: saddl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
-; CHECK-GI-NEXT:    saddw2 v0.2d, v1.2d, v0.4s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    saddl2 v0.2d, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
@@ -1134,9 +1153,8 @@ define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: usubl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    usubl v0.2d, v0.2s, v1.2s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    usubl2 v0.2d, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1177,9 +1195,8 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: ssubl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
-; CHECK-GI-NEXT:    ssubw2 v0.2d, v1.2d, v0.4s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    ssubl2 v0.2d, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 2f543cc324bc2..343b9c98fc205 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:       warning: Instruction selection used fallback path for sqshl1d
+; CHECK-GI:  warning: Instruction selection used fallback path for sqshl1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl1d_constant
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar_constant
@@ -82,15 +82,22 @@
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn16b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn8h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn4s
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_8h
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_4s
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift_m1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_8h
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_4s
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra_scalar
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for shll_high
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli8b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli4h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli2s
@@ -2227,8 +2234,7 @@ define <8 x i16> @ushll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    ushll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2247,8 +2253,7 @@ define <4 x i32> @ushll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2267,8 +2272,7 @@ define <2 x i64> @ushll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    ushll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -2822,8 +2826,7 @@ define <8 x i16> @sshll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    sshll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2842,8 +2845,7 @@ define <4 x i32> @sshll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    sshll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2862,8 +2864,7 @@ define <2 x i64> @sshll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    sshll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -4027,8 +4028,7 @@ define <4 x i32> @shll_high(<8 x i16> %in) {
 ;
 ; CHECK-GI-LABEL: shll_high:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT:    shl v0.4s, v0.4s, #16
+; CHECK-GI-NEXT:    shll2 v0.4s, v0.8h, #16
 ; CHECK-GI-NEXT:    ret
   %extract = shufflevector <8 x i16> %in, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext = zext <4 x i16> %extract to <4 x i32>

>From c23bf4bb80b6d27d3447007980d520a8df49c667 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 18:36:48 -0800
Subject: [PATCH 11/20] Fix crash

---
 llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h | 2 +-
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp        | 8 +++++++-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index c1c303fd18e6b..66a607fe1f231 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -264,7 +264,7 @@ class CombinerHelper {
   void applyCombineShuffleConcat(MachineInstr &MI,
                                  SmallVector<Register> &Ops) const;
 
-  /// Replace \p MI with a narrow extract_subvector.
+  /// Replace \p MI with a narrowed vector extract.
   bool matchCombineShuffleExtract(MachineInstr &MI, int64_t &IsFirst) const;
   void applyCombineShuffleExtract(MachineInstr &MI, int64_t IsFirst) const;
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index b8a800ba0da03..24945d4591b4b 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -391,8 +391,14 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   
   auto SrcVec1 = Shuffle.getSrc1Reg();
   int SrcVec2 = Shuffle.getSrc2Reg();
-  auto Mask = Shuffle.getMask();
 
+  LLT SrcVec1Type = MRI.getType(SrcVec1);
+  LLT SrcVec2Type = MRI.getType(SrcVec2);
+  if (!SrcVec1Type.isVector() || !SrcVec2Type.isVector()) {
+    return false;
+  }
+
+  auto Mask = Shuffle.getMask();
   int Width = MRI.getType(SrcVec1).getNumElements();
   int Width2 = MRI.getType(SrcVec2).getNumElements();
 

>From f8d1c160e0119a4d35850071604b53b8af8904f7 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 07:56:44 -0800
Subject: [PATCH 12/20] Only use the pattern in AMDGPU

---
 llvm/include/llvm/Target/GlobalISel/Combine.td | 2 +-
 llvm/lib/Target/AMDGPU/AMDGPUCombine.td        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 30316305d9e4f..17f1825cea7e4 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -2033,7 +2033,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
     and_or_disjoint_mask, fma_combines, fold_binop_into_select,
     sub_add_reg, select_to_minmax,
     fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
-    simplify_neg_minmax, combine_concat_vector, combine_shuffle_vector,
+    simplify_neg_minmax, combine_concat_vector,
     sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
     combine_use_vector_truncate, merge_combines, overflow_combines]>;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
index da47aaf8a3b5c..f06281af34968 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
@@ -163,7 +163,7 @@ def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
 
 def AMDGPUPreLegalizerCombiner: GICombiner<
   "AMDGPUPreLegalizerCombinerImpl",
-  [all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16, foldable_fneg]> {
+  [all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16, foldable_fneg, combine_shuffle_vector]> {
   let CombineAllMethodName = "tryCombineAllImpl";
 }
 

>From 15fd83ef87b8168f968dfb30b5574af1e58bd121 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 07:57:12 -0800
Subject: [PATCH 13/20] Revert "Fix some AArch64 tests"

This reverts commit 8af99ab174450417b4330e902df688cbda3b3d4c.
---
 llvm/test/CodeGen/AArch64/aarch64-smull.ll |  86 +++++++++--------
 llvm/test/CodeGen/AArch64/arm64-vabs.ll    | 105 +++++++++------------
 llvm/test/CodeGen/AArch64/arm64-vadd.ll    |  67 +++++--------
 llvm/test/CodeGen/AArch64/arm64-vshift.ll  |  30 +++---
 4 files changed, 132 insertions(+), 156 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 390953490e87d..3b589d3480179 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -4,16 +4,7 @@
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; CHECK-GI:       warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v4i32_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v4i32_uzp1
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pmlsl_pmlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v4i32_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v4i32_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for do_stuff
 
 define <8 x i16> @smull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_v8i8_v8i16:
@@ -2039,8 +2030,9 @@ define void @smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2073,8 +2065,9 @@ define void @umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2107,8 +2100,9 @@ define void @smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
-; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2141,8 +2135,9 @@ define void @umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
-; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2203,11 +2198,14 @@ define void @smlsl_smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
-; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
-; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
+; CHECK-GI-NEXT:    smull v2.8h, v3.8b, v2.8b
+; CHECK-GI-NEXT:    smlal v2.8h, v0.8b, v4.8b
+; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2246,11 +2244,14 @@ define void @umlsl_umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
-; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
-; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
+; CHECK-GI-NEXT:    umull v2.8h, v3.8b, v2.8b
+; CHECK-GI-NEXT:    umlal v2.8h, v0.8b, v4.8b
+; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2289,11 +2290,14 @@ define void @smlsl_smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
-; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
-; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
+; CHECK-GI-NEXT:    smull v2.4s, v3.4h, v2.4h
+; CHECK-GI-NEXT:    smlal v2.4s, v0.4h, v4.4h
+; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2332,11 +2336,14 @@ define void @umlsl_umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
-; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
-; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
+; CHECK-GI-NEXT:    umull v2.4s, v3.4h, v2.4h
+; CHECK-GI-NEXT:    umlal v2.4s, v0.4h, v4.4h
+; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2373,8 +2380,9 @@ define <2 x i32> @do_stuff(<2 x i64> %0, <2 x i64> %1) {
 ;
 ; CHECK-GI-LABEL: do_stuff:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v0.4s
-; CHECK-GI-NEXT:    smull2 v0.2d, v1.4s, v0.4s
+; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
+; CHECK-GI-NEXT:    mov d2, v1.d[1]
+; CHECK-GI-NEXT:    smull v0.2d, v2.2s, v0.2s
 ; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-GI-NEXT:    add v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index bb598bef8ad54..fe4657186cd2a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -2,26 +2,9 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:  warning: Instruction selection used fallback path for sabdl2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabds
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabdd
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabd_i64
-
+; CHECK-GI:  warning: Instruction selection used fallback path for fabds
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fabdd
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uabd_i64
 
 define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl8h:
@@ -75,9 +58,9 @@ define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    sabdl.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    sabdl2.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -98,9 +81,9 @@ define <4 x i32> @sabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    sabdl.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    sabdl2.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -121,9 +104,9 @@ define <2 x i64> @sabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -186,9 +169,9 @@ define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uabdl.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uabdl2.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -210,9 +193,9 @@ define <4 x i32> @uabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uabdl.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uabdl2.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -233,9 +216,9 @@ define <2 x i64> @uabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1163,10 +1146,10 @@ define <8 x i16> @sabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_8h:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    sabal.8h v0, v1, v2
+; CHECK-GI-NEXT:    sabal2.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1190,10 +1173,10 @@ define <4 x i32> @sabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_4s:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    sabal.4s v0, v1, v2
+; CHECK-GI-NEXT:    sabal2.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1217,10 +1200,10 @@ define <2 x i64> @sabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_2d:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    sabal.2d v0, v1, v2
+; CHECK-GI-NEXT:    sabal2.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1295,10 +1278,10 @@ define <8 x i16> @uabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_8h:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    uabal.8h v0, v1, v2
+; CHECK-GI-NEXT:    uabal2.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1322,10 +1305,10 @@ define <4 x i32> @uabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_4s:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    uabal.4s v0, v1, v2
+; CHECK-GI-NEXT:    uabal2.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1349,10 +1332,10 @@ define <2 x i64> @uabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_2d:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    uabal.2d v0, v1, v2
+; CHECK-GI-NEXT:    uabal2.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1625,8 +1608,9 @@ define <2 x i64> @uabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.4s v1, w0
-; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.2s v1, w0
+; CHECK-GI-NEXT:    mov d0, v0[1]
+; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1659,8 +1643,9 @@ define <2 x i64> @sabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: sabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.4s v1, w0
-; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.2s v1, w0
+; CHECK-GI-NEXT:    mov d0, v0[1]
+; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vadd.ll b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
index 017873158e562..d982dbbb1f69b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
@@ -2,29 +2,8 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for saddl2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddlp1d
+; CHECK-GI:         warning: Instruction selection used fallback path for saddlp1d
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddlp1d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl_duplhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_duplhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl2_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl_duplhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl2_duplhs
-
 
 define <8 x i8> @addhn8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: addhn8b:
@@ -437,8 +416,8 @@ define <8 x i16> @uaddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uaddw v0.8h, v0.8h, v1.8b
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uaddw2 v0.8h, v0.8h, v1.16b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -461,8 +440,8 @@ define <4 x i32> @uaddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uaddw v0.4s, v0.4s, v1.4h
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uaddw2 v0.4s, v0.4s, v1.8h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -485,8 +464,8 @@ define <2 x i64> @uaddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uaddw v0.2d, v0.2d, v1.2s
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uaddw2 v0.2d, v0.2d, v1.4s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -551,8 +530,8 @@ define <8 x i16> @saddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    saddw v0.8h, v0.8h, v1.8b
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    saddw2 v0.8h, v0.8h, v1.16b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -575,8 +554,8 @@ define <4 x i32> @saddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    saddw v0.4s, v0.4s, v1.4h
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    saddw2 v0.4s, v0.4s, v1.8h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -599,8 +578,8 @@ define <2 x i64> @saddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    saddw v0.2d, v0.2d, v1.2s
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    saddw2 v0.2d, v0.2d, v1.4s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -1069,8 +1048,9 @@ define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uaddl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    uaddl2 v0.2d, v0.4s, v1.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-GI-NEXT:    uaddw2 v0.2d, v1.2d, v0.4s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1111,8 +1091,9 @@ define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: saddl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    saddl2 v0.2d, v1.4s, v0.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
+; CHECK-GI-NEXT:    saddw2 v0.2d, v1.2d, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
@@ -1153,8 +1134,9 @@ define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: usubl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    usubl2 v0.2d, v0.4s, v1.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    usubl v0.2d, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1195,8 +1177,9 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: ssubl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    ssubl2 v0.2d, v1.4s, v0.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
+; CHECK-GI-NEXT:    ssubw2 v0.2d, v1.2d, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 343b9c98fc205..2f543cc324bc2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:  warning: Instruction selection used fallback path for sqshl1d
+; CHECK-GI:       warning: Instruction selection used fallback path for sqshl1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl1d_constant
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar_constant
@@ -82,22 +82,15 @@
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn16b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn8h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn4s
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_8h
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_4s
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift_m1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_8h
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_4s
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra_scalar
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for shll_high
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli8b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli4h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli2s
@@ -2234,7 +2227,8 @@ define <8 x i16> @ushll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    ushll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2253,7 +2247,8 @@ define <4 x i32> @ushll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2272,7 +2267,8 @@ define <2 x i64> @ushll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    ushll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -2826,7 +2822,8 @@ define <8 x i16> @sshll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    sshll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2845,7 +2842,8 @@ define <4 x i32> @sshll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    sshll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2864,7 +2862,8 @@ define <2 x i64> @sshll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    sshll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -4028,7 +4027,8 @@ define <4 x i32> @shll_high(<8 x i16> %in) {
 ;
 ; CHECK-GI-LABEL: shll_high:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-GI-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-GI-NEXT:    shl v0.4s, v0.4s, #16
 ; CHECK-GI-NEXT:    ret
   %extract = shufflevector <8 x i16> %in, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext = zext <4 x i16> %extract to <4 x i32>

>From cd3da99085112938aca794568653c1f7fac07fb7 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 10:02:46 -0800
Subject: [PATCH 14/20] Fix linting and add test

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 11 +++++++----
 .../CodeGen/AMDGPU/GlobalISel/shufflevector.ll | 18 ++++++++++++++++++
 2 files changed, 25 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 24945d4591b4b..34e7b21ec43f6 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -384,11 +384,12 @@ void CombinerHelper::applyCombineConcatVectors(
   MI.eraseFromParent();
 }
 
-bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx) const {
+bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI,
+                                                int64_t &Idx) const {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
          "Invalid instruction");
   auto &Shuffle = cast<GShuffleVector>(MI);
-  
+
   auto SrcVec1 = Shuffle.getSrc1Reg();
   int SrcVec2 = Shuffle.getSrc2Reg();
 
@@ -430,7 +431,8 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   return true;
 }
 
-void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) const {
+void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI,
+                                                int64_t Idx) const {
   auto &Shuffle = cast<GShuffleVector>(MI);
 
   auto SrcVec1 = Shuffle.getSrc1Reg();
@@ -440,7 +442,8 @@ void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) c
 
   auto SrcVec = Idx < Width ? SrcVec1 : SrcVec2;
 
-  Builder.buildExtract(MI.getOperand(0).getReg(), SrcVec, Idx * EltTy.getSizeInBits());
+  Builder.buildExtract(MI.getOperand(0).getReg(), SrcVec,
+                       Idx * EltTy.getSizeInBits());
   MI.eraseFromParent();
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll
new file mode 100644
index 0000000000000..09274c4d3626b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll
@@ -0,0 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -march=amdgcn -mtriple=amdgcn-amd-hmcsa -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX942 %s
+
+define void @shuffle_to_extract(ptr addrspace(3) %in, ptr addrspace(3) %out) {
+; GFX942-LABEL: shuffle_to_extract:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    ds_read2_b64 v[2:5], v0 offset1:1
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    ds_write_b64 v1, v[4:5]
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_setpc_b64 s[30:31]
+  %val = load <8 x half>, ptr addrspace(3) %in, align 8
+  %res = shufflevector <8 x half> %val, <8 x half> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  store <4 x half> %res, ptr addrspace(3) %out, align 8
+  ret void
+}
+

>From 6337767d9033a62e894402a8358c59e3b4f6d674 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 17:58:11 -0800
Subject: [PATCH 15/20] Adding mir test case

---
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |   4 +-
 .../prelegalizer-combiner-shuffle.mir         | 132 ++++++++++++++++++
 2 files changed, 134 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 34e7b21ec43f6..165dd68193d3a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -420,9 +420,9 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI,
     return false;
   }
 
-  // Check if the extractee's order is kept, and they should be conscecutive.
+  // Check if the extractee's order is kept, and they should be consecutive.
   for (size_t i = 1; i < Mask.size(); ++i) {
-    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
+    if (Mask[i] != Mask[i - 1] + 1) {
       return false; // Not consecutive
     }
   }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
new file mode 100644
index 0000000000000..46ad02f8aded8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
@@ -0,0 +1,132 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs -o - %s | FileCheck %s
+
+---
+name: shuffle_vector_to_extract
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 64
+    ; CHECK-NEXT: G_STORE [[EXTRACT]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<4 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(4, 5, 6, 7)
+    G_STORE %11(<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
+    SI_RETURN
+...
+
+---
+name: shuffle_vector_to_extract2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract2
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 48
+    ; CHECK-NEXT: G_STORE [[EXTRACT]](<2 x s16>), [[COPY1]](p3) :: (store (<2 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<2 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(3, 4)
+    G_STORE %11(<2 x s16>), %1(p3) :: (store (<2 x s16>), addrspace 3)
+    SI_RETURN
+
+...
+
+---
+name: shuffle_vector_to_extract_odd_elements
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract_odd_elements
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 0
+    ; CHECK-NEXT: G_STORE [[EXTRACT]](<3 x s16>), [[COPY1]](p3) :: (store (<3 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<3 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(0, 1, 2)
+    G_STORE %11(<3 x s16>), %1(p3) :: (store (<3 x s16>), addrspace 3)
+    SI_RETURN
+...
+
+
+---
+name: shuffle_vector_to_extract_minus_1_no_conversion
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract_minus_1_no_conversion
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s16>) = G_SHUFFLE_VECTOR [[LOAD]](<8 x s16>), [[DEF]], shufflemask(4, 5, undef, 7)
+    ; CHECK-NEXT: G_STORE [[SHUF]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<4 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(4, 5, -1, 7)
+    G_STORE %11(<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
+    SI_RETURN
+...
+
+---
+name: shuffle_vector_to_extract_across_vectors_no_conversion
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract_across_vectors_no_conversion
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s16>) = G_SHUFFLE_VECTOR [[LOAD]](<8 x s16>), [[DEF]], shufflemask(6, 7, undef, undef)
+    ; CHECK-NEXT: G_STORE [[SHUF]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<4 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(6, 7, 8, 9)
+    G_STORE %11(<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
+    SI_RETURN
+...
+

>From d71249ac9e0501073dd88072c1e245836ed8ccce Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Thu, 13 Feb 2025 07:11:37 -0800
Subject: [PATCH 16/20] Switch to different pattern matching style.

---
 llvm/include/llvm/Target/GlobalISel/Combine.td | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 17f1825cea7e4..7ec8f4f2302ce 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1563,7 +1563,7 @@ def combine_shuffle_concat : GICombineRule<
 // Combines shuffles of vector into extract_subvector
 def combine_shuffle_vector : GICombineRule<
   (defs root:$root, int64_matchinfo:$matchinfo),
-  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+  (match (G_SHUFFLE_VECTOR $root, $src1, $src2, $mask):$root,
     [{ return Helper.matchCombineShuffleExtract(*${root}, ${matchinfo}); }]),
   (apply [{ Helper.applyCombineShuffleExtract(*${root}, ${matchinfo}); }])>;
 

>From 483feb8fba54fbd0a30d1fb0ef8e86c94009cc4d Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Thu, 13 Feb 2025 09:31:31 -0800
Subject: [PATCH 17/20] Do not combine into G_EXTRACT.

---
 .../llvm/CodeGen/GlobalISel/CombinerHelper.h  |  4 +-
 .../include/llvm/Target/GlobalISel/Combine.td |  4 +-
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 46 ++++++++++---------
 .../prelegalizer-combiner-shuffle.mir         | 29 +++++++-----
 4 files changed, 46 insertions(+), 37 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 66a607fe1f231..9134591868d39 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -265,8 +265,8 @@ class CombinerHelper {
                                  SmallVector<Register> &Ops) const;
 
   /// Replace \p MI with a narrowed vector extract.
-  bool matchCombineShuffleExtract(MachineInstr &MI, int64_t &IsFirst) const;
-  void applyCombineShuffleExtract(MachineInstr &MI, int64_t IsFirst) const;
+  bool matchCombineShuffle(MachineInstr &MI) const;
+  void applyCombineShuffle(MachineInstr &MI) const;
 
   /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
   /// Returns true if MI changed.
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 7ec8f4f2302ce..83e8b21052e1d 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1564,8 +1564,8 @@ def combine_shuffle_concat : GICombineRule<
 def combine_shuffle_vector : GICombineRule<
   (defs root:$root, int64_matchinfo:$matchinfo),
   (match (G_SHUFFLE_VECTOR $root, $src1, $src2, $mask):$root,
-    [{ return Helper.matchCombineShuffleExtract(*${root}, ${matchinfo}); }]),
-  (apply [{ Helper.applyCombineShuffleExtract(*${root}, ${matchinfo}); }])>;
+    [{ return Helper.matchCombineShuffle(*${root}); }]),
+  (apply [{ Helper.applyCombineShuffle(*${root}); }])>;
 
 def insert_vector_element_idx_undef : GICombineRule<
    (defs root:$root),
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 165dd68193d3a..e0357be852831 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -384,8 +384,7 @@ void CombinerHelper::applyCombineConcatVectors(
   MI.eraseFromParent();
 }
 
-bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI,
-                                                int64_t &Idx) const {
+bool CombinerHelper::matchCombineShuffle(MachineInstr &MI) const {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
          "Invalid instruction");
   auto &Shuffle = cast<GShuffleVector>(MI);
@@ -399,51 +398,56 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI,
     return false;
   }
 
+  /*
   auto Mask = Shuffle.getMask();
-  int Width = MRI.getType(SrcVec1).getNumElements();
-  int Width2 = MRI.getType(SrcVec2).getNumElements();
-
-  if (!llvm::isPowerOf2_32(Width))
-    return false;
 
   // Check if all elements are extracted from the same vector, or within single
   // vector.
+  // TODO: this is unnecessary.
   auto MaxValue = *std::max_element(Mask.begin(), Mask.end());
   auto MinValue = *std::min_element(Mask.begin(), Mask.end());
   if (MaxValue >= Width && MinValue < Width) {
     return false;
   }
 
-  // Check that the extractee length is power of 2.
-  if ((MaxValue < Width && !llvm::isPowerOf2_32(Width)) ||
-      (MinValue >= Width && !llvm::isPowerOf2_32(Width2))) {
-    return false;
-  }
-
   // Check if the extractee's order is kept, and they should be consecutive.
+  // This would allow opportunities.
   for (size_t i = 1; i < Mask.size(); ++i) {
     if (Mask[i] != Mask[i - 1] + 1) {
       return false; // Not consecutive
     }
   }
+  */
 
-  Idx = Mask.front();
   return true;
 }
 
-void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI,
-                                                int64_t Idx) const {
+void CombinerHelper::applyCombineShuffle(MachineInstr &MI) const {
   auto &Shuffle = cast<GShuffleVector>(MI);
 
   auto SrcVec1 = Shuffle.getSrc1Reg();
   auto SrcVec2 = Shuffle.getSrc2Reg();
   auto EltTy = MRI.getType(SrcVec1).getElementType();
-  int Width = MRI.getType(SrcVec1).getNumElements();
-
-  auto SrcVec = Idx < Width ? SrcVec1 : SrcVec2;
+  auto Width = MRI.getType(SrcVec1).getNumElements();
+  auto ExtractWidth = Shuffle.getMask().size();
+
+  auto Unmerge1 = Builder.buildUnmerge(EltTy, SrcVec1);
+  auto Unmerge2 = Builder.buildUnmerge(EltTy, SrcVec2);
+  
+
+  llvm::SmallVector<Register> Extracts;
+  // Select only applicable elements from unmerged values.
+  for (auto Val : Shuffle.getMask()) {
+    if (Val == -1) {
+      Extracts.push_back(Builder.buildUndef(EltTy).getReg(0));
+    } else if (Val < Width) {
+      Extracts.push_back(Unmerge1.getReg(Val));
+    } else {
+      Extracts.push_back(Unmerge2.getReg(Val - Width));
+    }
+  }
 
-  Builder.buildExtract(MI.getOperand(0).getReg(), SrcVec,
-                       Idx * EltTy.getSizeInBits());
+  Builder.buildBuildVector(MI.getOperand(0).getReg(), Extracts);
   MI.eraseFromParent();
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
index 46ad02f8aded8..bba608cceee19 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
@@ -14,8 +14,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
-    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 64
-    ; CHECK-NEXT: G_STORE [[EXTRACT]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<8 x s16>)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV4]](s16), [[UV5]](s16), [[UV6]](s16), [[UV7]](s16)
+    ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
     ; CHECK-NEXT: SI_RETURN
     %0:_(p3) = COPY $vgpr0
     %1:_(p3) = COPY $vgpr1
@@ -39,8 +40,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
-    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 48
-    ; CHECK-NEXT: G_STORE [[EXTRACT]](<2 x s16>), [[COPY1]](p3) :: (store (<2 x s16>), addrspace 3)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<8 x s16>)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[UV3]](s16), [[UV4]](s16)
+    ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<2 x s16>), [[COPY1]](p3) :: (store (<2 x s16>), addrspace 3)
     ; CHECK-NEXT: SI_RETURN
     %0:_(p3) = COPY $vgpr0
     %1:_(p3) = COPY $vgpr1
@@ -65,8 +67,9 @@ body:             |
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
-    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 0
-    ; CHECK-NEXT: G_STORE [[EXTRACT]](<3 x s16>), [[COPY1]](p3) :: (store (<3 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<8 x s16>)
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16)
+    ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<3 x s16>), [[COPY1]](p3) :: (store (<3 x s16>), align 8, addrspace 3)
     ; CHECK-NEXT: SI_RETURN
     %0:_(p3) = COPY $vgpr0
     %1:_(p3) = COPY $vgpr1
@@ -90,10 +93,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
-    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
-    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s16>) = G_SHUFFLE_VECTOR [[LOAD]](<8 x s16>), [[DEF]], shufflemask(4, 5, undef, 7)
-    ; CHECK-NEXT: G_STORE [[SHUF]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<8 x s16>)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV4]](s16), [[UV5]](s16), [[DEF]](s16), [[UV7]](s16)
+    ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
     ; CHECK-NEXT: SI_RETURN
     %0:_(p3) = COPY $vgpr0
     %1:_(p3) = COPY $vgpr1
@@ -116,10 +120,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
     ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
-    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
     ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
-    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s16>) = G_SHUFFLE_VECTOR [[LOAD]](<8 x s16>), [[DEF]], shufflemask(6, 7, undef, undef)
-    ; CHECK-NEXT: G_STORE [[SHUF]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<8 x s16>)
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV6]](s16), [[UV7]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK-NEXT: G_STORE [[BUILD_VECTOR]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
     ; CHECK-NEXT: SI_RETURN
     %0:_(p3) = COPY $vgpr0
     %1:_(p3) = COPY $vgpr1

>From e1f971706750a09e3ad9daef04f0bc6f274e6c9c Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Thu, 13 Feb 2025 14:42:13 -0800
Subject: [PATCH 18/20] Update AMDGPU test cases.

---
 ...ffer-fat-pointers-contents-legalization.ll | 27 +++++--------------
 .../CodeGen/AMDGPU/integer-mad-patterns.ll    |  6 ++---
 llvm/test/CodeGen/AMDGPU/mad-mix.ll           | 16 +++--------
 3 files changed, 13 insertions(+), 36 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll
index 405058b24dcc2..fdc1dd6cce8e1 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-contents-legalization.ll
@@ -1736,10 +1736,6 @@ define <5 x i16> @load_v5i16(ptr addrspace(8) inreg %buf) {
 ; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GISEL-NEXT:    buffer_load_dwordx2 v[0:1], off, s[16:19], 0
 ; GISEL-NEXT:    buffer_load_ushort v2, off, s[16:19], 0 offset:8
-; GISEL-NEXT:    s_mov_b32 s4, 0xffff
-; GISEL-NEXT:    s_waitcnt vmcnt(1)
-; GISEL-NEXT:    v_bfi_b32 v0, s4, v0, v0
-; GISEL-NEXT:    v_bfi_b32 v1, s4, v1, v1
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %p = addrspacecast ptr addrspace(8) %buf to ptr addrspace(7)
@@ -1820,11 +1816,6 @@ define <7 x i16> @load_v7i16(ptr addrspace(8) inreg %buf) {
 ; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GISEL-NEXT:    buffer_load_dwordx3 v[0:2], off, s[16:19], 0
 ; GISEL-NEXT:    buffer_load_ushort v3, off, s[16:19], 0 offset:12
-; GISEL-NEXT:    s_mov_b32 s4, 0xffff
-; GISEL-NEXT:    s_waitcnt vmcnt(1)
-; GISEL-NEXT:    v_bfi_b32 v0, s4, v0, v0
-; GISEL-NEXT:    v_bfi_b32 v1, s4, v1, v1
-; GISEL-NEXT:    v_bfi_b32 v2, s4, v2, v2
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %p = addrspacecast ptr addrspace(8) %buf to ptr addrspace(7)
@@ -1867,12 +1858,6 @@ define <9 x i16> @load_v9i16(ptr addrspace(8) inreg %buf) {
 ; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GISEL-NEXT:    buffer_load_dwordx4 v[0:3], off, s[16:19], 0
 ; GISEL-NEXT:    buffer_load_ushort v4, off, s[16:19], 0 offset:16
-; GISEL-NEXT:    s_mov_b32 s4, 0xffff
-; GISEL-NEXT:    s_waitcnt vmcnt(1)
-; GISEL-NEXT:    v_bfi_b32 v0, s4, v0, v0
-; GISEL-NEXT:    v_bfi_b32 v1, s4, v1, v1
-; GISEL-NEXT:    v_bfi_b32 v2, s4, v2, v2
-; GISEL-NEXT:    v_bfi_b32 v3, s4, v3, v3
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %p = addrspacecast ptr addrspace(8) %buf to ptr addrspace(7)
@@ -2181,14 +2166,14 @@ define <6 x i8> @load_v6i8(ptr addrspace(8) inreg %buf) {
 ; GISEL-LABEL: load_v6i8:
 ; GISEL:       ; %bb.0:
 ; GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT:    buffer_load_dword v0, off, s[16:19], 0
 ; GISEL-NEXT:    buffer_load_ushort v4, off, s[16:19], 0 offset:4
+; GISEL-NEXT:    buffer_load_dword v0, off, s[16:19], 0
 ; GISEL-NEXT:    s_waitcnt vmcnt(1)
-; GISEL-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GISEL-NEXT:    v_lshrrev_b32_e32 v5, 8, v4
+; GISEL-NEXT:    s_waitcnt vmcnt(0)
 ; GISEL-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
+; GISEL-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
 ; GISEL-NEXT:    v_lshrrev_b32_e32 v3, 24, v0
-; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    v_lshrrev_b32_e32 v5, 8, v4
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %p = addrspacecast ptr addrspace(8) %buf to ptr addrspace(7)
   %ret = load <6 x i8>, ptr addrspace(7) %p
@@ -3644,11 +3629,11 @@ define <6 x i8> @volatile_load_v6i8(ptr addrspace(8) inreg %buf) {
 ; GISEL-NEXT:    buffer_load_dword v0, off, s[16:19], 0 glc
 ; GISEL-NEXT:    buffer_load_ushort v4, off, s[16:19], 0 offset:4 glc
 ; GISEL-NEXT:    s_waitcnt vmcnt(1)
-; GISEL-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
 ; GISEL-NEXT:    v_lshrrev_b32_e32 v1, 8, v0
-; GISEL-NEXT:    v_lshrrev_b32_e32 v3, 24, v0
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
 ; GISEL-NEXT:    v_lshrrev_b32_e32 v5, 8, v4
+; GISEL-NEXT:    v_lshrrev_b32_e32 v2, 16, v0
+; GISEL-NEXT:    v_lshrrev_b32_e32 v3, 24, v0
 ; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %p = addrspacecast ptr addrspace(8) %buf to ptr addrspace(7)
   %ret = load volatile <6 x i8>, ptr addrspace(7) %p
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index c0c0d3ded117d..c7f02b162dfee 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -9049,13 +9049,13 @@ define <4 x i16> @multi_use_mul_mad_v2i16_var(<2 x i16> %x, <2 x i16> %y, <2 x i
 ; GFX8-GISEL-NEXT:    v_lshrrev_b32_e32 v4, 16, v0
 ; GFX8-GISEL-NEXT:    v_lshrrev_b32_e32 v5, 16, v1
 ; GFX8-GISEL-NEXT:    v_lshrrev_b32_e32 v6, 16, v2
+; GFX8-GISEL-NEXT:    v_lshrrev_b32_e32 v7, 16, v3
 ; GFX8-GISEL-NEXT:    v_mad_u16 v6, v4, v5, v6
 ; GFX8-GISEL-NEXT:    v_mad_u16 v2, v0, v1, v2
 ; GFX8-GISEL-NEXT:    v_lshlrev_b32_e32 v6, 16, v6
-; GFX8-GISEL-NEXT:    v_or_b32_e32 v2, v2, v6
-; GFX8-GISEL-NEXT:    v_lshrrev_b32_e32 v6, 16, v3
 ; GFX8-GISEL-NEXT:    v_mad_u16 v0, v0, v1, v3
-; GFX8-GISEL-NEXT:    v_mad_u16 v1, v4, v5, v6
+; GFX8-GISEL-NEXT:    v_mad_u16 v1, v4, v5, v7
+; GFX8-GISEL-NEXT:    v_or_b32_e32 v2, v2, v6
 ; GFX8-GISEL-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX8-GISEL-NEXT:    v_or_b32_e32 v1, v0, v1
 ; GFX8-GISEL-NEXT:    v_mov_b32_e32 v0, v2
diff --git a/llvm/test/CodeGen/AMDGPU/mad-mix.ll b/llvm/test/CodeGen/AMDGPU/mad-mix.ll
index 4c2a16c17b38a..1720bf984ca09 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-mix.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-mix.ll
@@ -440,21 +440,13 @@ define <2 x float> @v_mad_mix_v2f32_shuffle(<2 x half> %src0, <2 x half> %src1,
 ; GISEL-CI-LABEL: v_mad_mix_v2f32_shuffle:
 ; GISEL-CI:       ; %bb.0:
 ; GISEL-CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-CI-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GISEL-CI-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GISEL-CI-NEXT:    v_or_b32_e32 v0, v1, v0
-; GISEL-CI-NEXT:    v_lshlrev_b32_e32 v1, 16, v5
-; GISEL-CI-NEXT:    v_and_b32_e32 v4, 0xffff, v4
-; GISEL-CI-NEXT:    v_or_b32_e32 v1, v1, v4
-; GISEL-CI-NEXT:    v_lshrrev_b32_e32 v4, 16, v0
-; GISEL-CI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
-; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v4, v4
-; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v5, v0
+; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v4, v1
+; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v6, v0
 ; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v0, v2
-; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v1, v5
 ; GISEL-CI-NEXT:    v_cvt_f32_f16_e32 v2, v3
 ; GISEL-CI-NEXT:    v_mad_f32 v0, v4, v0, v1
-; GISEL-CI-NEXT:    v_mac_f32_e32 v1, v5, v2
+; GISEL-CI-NEXT:    v_mac_f32_e32 v1, v6, v2
 ; GISEL-CI-NEXT:    s_setpc_b64 s[30:31]
   %src0.shuf = shufflevector <2 x half> %src0, <2 x half> undef, <2 x i32> <i32 1, i32 0>
   %src1.shuf = shufflevector <2 x half> %src1, <2 x half> undef, <2 x i32> <i32 0, i32 1>

>From a1b2ecd50776a9410134e9727289bb2bea78b84e Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Thu, 13 Feb 2025 14:46:17 -0800
Subject: [PATCH 19/20] linting

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 1 -
 1 file changed, 1 deletion(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index e0357be852831..ff27dd2fcf097 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -433,7 +433,6 @@ void CombinerHelper::applyCombineShuffle(MachineInstr &MI) const {
 
   auto Unmerge1 = Builder.buildUnmerge(EltTy, SrcVec1);
   auto Unmerge2 = Builder.buildUnmerge(EltTy, SrcVec2);
-  
 
   llvm::SmallVector<Register> Extracts;
   // Select only applicable elements from unmerged values.

>From b72a6828bdfebadb5d190ec052bc89c5e946a8d0 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Fri, 21 Feb 2025 13:07:41 -0500
Subject: [PATCH 20/20] update

---
 llvm/include/llvm/Target/GlobalISel/Combine.td | 4 ++--
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 3 +--
 2 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 83e8b21052e1d..3e76348a8f721 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1562,8 +1562,8 @@ def combine_shuffle_concat : GICombineRule<
 
 // Combines shuffles of vector into extract_subvector
 def combine_shuffle_vector : GICombineRule<
-  (defs root:$root, int64_matchinfo:$matchinfo),
-  (match (G_SHUFFLE_VECTOR $root, $src1, $src2, $mask):$root,
+  (defs root:$root),
+  (match (G_SHUFFLE_VECTOR $dst, $src1, $src2, $mask):$root,
     [{ return Helper.matchCombineShuffle(*${root}); }]),
   (apply [{ Helper.applyCombineShuffle(*${root}); }])>;
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index ff27dd2fcf097..75960c6fe7faa 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -429,12 +429,11 @@ void CombinerHelper::applyCombineShuffle(MachineInstr &MI) const {
   auto SrcVec2 = Shuffle.getSrc2Reg();
   auto EltTy = MRI.getType(SrcVec1).getElementType();
   auto Width = MRI.getType(SrcVec1).getNumElements();
-  auto ExtractWidth = Shuffle.getMask().size();
 
   auto Unmerge1 = Builder.buildUnmerge(EltTy, SrcVec1);
   auto Unmerge2 = Builder.buildUnmerge(EltTy, SrcVec2);
 
-  llvm::SmallVector<Register> Extracts;
+  SmallVector<Register> Extracts;
   // Select only applicable elements from unmerged values.
   for (auto Val : Shuffle.getMask()) {
     if (Val == -1) {



More information about the llvm-commits mailing list