[llvm] [GISel][AMDGPU] Fold ShuffleVector into Extract (PR #124527)

Alan Li via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 12 17:58:34 PST 2025


https://github.com/lialan updated https://github.com/llvm/llvm-project/pull/124527

>From 7544ec3c7bc79517af4b6c55ac1d2b0ede2aec94 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Mon, 27 Jan 2025 18:26:19 +0800
Subject: [PATCH 01/15] First commit

---
 .../llvm/CodeGen/GlobalISel/CombinerHelper.h  |  4 ++
 .../include/llvm/Target/GlobalISel/Combine.td |  9 ++-
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 41 +++++++++++++
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 59 +++++++++++++++++++
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h  |  2 +
 5 files changed, 114 insertions(+), 1 deletion(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 9b78342c8fc39..c1c303fd18e6b 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -264,6 +264,10 @@ class CombinerHelper {
   void applyCombineShuffleConcat(MachineInstr &MI,
                                  SmallVector<Register> &Ops) const;
 
+  /// Replace \p MI with a narrow extract_subvector.
+  bool matchCombineShuffleExtract(MachineInstr &MI, int64_t &IsFirst) const;
+  void applyCombineShuffleExtract(MachineInstr &MI, int64_t IsFirst) const;
+
   /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
   /// Returns true if MI changed.
   ///
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 3590ab221ad44..30316305d9e4f 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -1560,6 +1560,13 @@ def combine_shuffle_concat : GICombineRule<
         [{ return Helper.matchCombineShuffleConcat(*${root}, ${matchinfo}); }]),
   (apply [{ Helper.applyCombineShuffleConcat(*${root}, ${matchinfo}); }])>;
 
+// Combines shuffles of vector into extract_subvector
+def combine_shuffle_vector : GICombineRule<
+  (defs root:$root, int64_matchinfo:$matchinfo),
+  (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
+    [{ return Helper.matchCombineShuffleExtract(*${root}, ${matchinfo}); }]),
+  (apply [{ Helper.applyCombineShuffleExtract(*${root}, ${matchinfo}); }])>;
+
 def insert_vector_element_idx_undef : GICombineRule<
    (defs root:$root),
    (match (G_IMPLICIT_DEF $idx),
@@ -2026,7 +2033,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
     and_or_disjoint_mask, fma_combines, fold_binop_into_select,
     sub_add_reg, select_to_minmax,
     fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
-    simplify_neg_minmax, combine_concat_vector,
+    simplify_neg_minmax, combine_concat_vector, combine_shuffle_vector,
     sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
     combine_use_vector_truncate, merge_combines, overflow_combines]>;
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0dfbb91f2ac54..2e517304d527a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -384,6 +384,47 @@ void CombinerHelper::applyCombineConcatVectors(
   MI.eraseFromParent();
 }
 
+bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx) const {
+  assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
+         "Invalid instruction");
+  auto &Shuffle = cast<GShuffleVector>(MI);
+  const auto &TLI = getTargetLowering();
+  
+  auto SrcVec1 = Shuffle.getSrc1Reg();
+  auto SrcVec2 = Shuffle.getSrc2Reg();
+  auto Mask = Shuffle.getMask();
+
+  int Width = MRI.getType(SrcVec1).getNumElements();
+
+  // Check if all elements are extracted from the same vector, or within single
+  // vector.
+  auto MaxValue = *std::max_element(Mask.begin(), Mask.end());
+  auto MinValue = *std::min_element(Mask.begin(), Mask.end());
+  if (MaxValue >= Width && MinValue < Width) {
+    return false;
+  }
+  // Check if the extractee's order is kept:
+  if (!std::is_sorted(Mask.begin(), Mask.end())) {
+    return false;
+  }
+
+  Idx = Mask.front();
+  return true;
+}
+
+void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) const {
+  auto &Shuffle = cast<GShuffleVector>(MI);
+
+  auto SrcVec1 = Shuffle.getSrc1Reg();
+  auto SrcVec2 = Shuffle.getSrc2Reg();
+  int Width = MRI.getType(SrcVec1).getNumElements();
+
+  auto SrcVec = Idx < Width ? SrcVec1 : SrcVec2;
+
+  Builder.buildExtractSubvector(MI.getOperand(0).getReg(), SrcVec, Idx);
+  MI.eraseFromParent();
+}
+
 bool CombinerHelper::matchCombineShuffleConcat(
     MachineInstr &MI, SmallVector<Register> &Ops) const {
   ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index e9e47eaadd557..68b0a8b5aecbf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -29,6 +29,7 @@
 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
+#include "llvm/CodeGen/Register.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -1832,6 +1833,11 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
       .lower();
   }
 
+  getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
+    //.fewerElementsIf(isWideVec16(0), changeTo(0, V2S16))
+    .customFor({V8S16, V4S16})
+    .lower();
+
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {
         const LLT &EltTy = Query.Types[1].getElementType();
@@ -2127,6 +2133,8 @@ bool AMDGPULegalizerInfo::legalizeCustom(
   case TargetOpcode::G_FMINNUM_IEEE:
   case TargetOpcode::G_FMAXNUM_IEEE:
     return legalizeMinNumMaxNum(Helper, MI);
+  case TargetOpcode::G_EXTRACT_SUBVECTOR:
+    return legalizeExtractSubvector(MI, MRI, B);
   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
     return legalizeExtractVectorElt(MI, MRI, B);
   case TargetOpcode::G_INSERT_VECTOR_ELT:
@@ -2716,6 +2724,57 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
   return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
 }
 
+static auto buildExtractSubvector(MachineIRBuilder &B, SrcOp Src,
+                                  LLT DstTy, unsigned Start) {
+  SmallVector<Register, 8> Subvectors;
+  for (unsigned i = Start, e = Start + DstTy.getNumElements(); i != e; ++i) {
+    Subvectors.push_back(
+        B.buildExtractVectorElementConstant(DstTy.getElementType(), Src, i)
+            .getReg(0));
+  }
+  return B.buildBuildVector(DstTy, Subvectors);
+}
+
+bool AMDGPULegalizerInfo::legalizeExtractSubvector(
+  MachineInstr &MI, MachineRegisterInfo &MRI,
+  MachineIRBuilder &B) const {
+  const auto &Instr = llvm::cast<GExtractSubvector>(MI);
+  Register Src = Instr.getSrcVec();
+  Register Dst = MI.getOperand(0).getReg();
+  auto Start = Instr.getIndexImm();
+
+  LLT SrcTy = MRI.getType(Src);
+  LLT DstTy = MRI.getType(Dst);
+
+  LLT EltTy = SrcTy.getElementType();
+  assert(EltTy == DstTy.getElementType());
+  auto Count = DstTy.getNumElements();
+  assert(SrcTy.getNumElements() % 2 == 0 && Count % 2 == 0);
+
+  // Split vector size into legal sub vectors, and use build_vector
+  // to merge the result.
+  if (EltTy.getScalarSizeInBits() == 16 && Start % 2 == 0) {
+    bool UseScalar = Count == 2;
+    // Extract 32-bit registers at a time.
+    LLT NewSrcTy =
+        UseScalar ? S32 : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
+    auto Bitcasted = B.buildBitcast(NewSrcTy, Src).getReg(0);
+    LLT NewDstTy = LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+
+    SmallVector<Register, 8> Subvectors;
+    for (unsigned i = Start / 2, e = (Start + Count) / 2; i != e; ++i) {
+      auto Subvec = B.buildExtractVectorElementConstant(S32, Bitcasted, i);
+      Subvectors.push_back(Subvec.getReg(0));
+    }
+
+    auto BuildVec = B.buildBuildVector(NewDstTy, Subvectors);
+    B.buildBitcast(Dst, BuildVec.getReg(0));
+    MI.eraseFromParent();
+    return true;
+  }
+  return false;
+}
+
 bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
   MachineInstr &MI, MachineRegisterInfo &MRI,
   MachineIRBuilder &B) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 86c15197805d2..7b55492afb982 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -56,6 +56,8 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {
   bool legalizeFPTOI(MachineInstr &MI, MachineRegisterInfo &MRI,
                      MachineIRBuilder &B, bool Signed) const;
   bool legalizeMinNumMaxNum(LegalizerHelper &Helper, MachineInstr &MI) const;
+  bool legalizeExtractSubvector(MachineInstr &MI, MachineRegisterInfo &MRI,
+                                MachineIRBuilder &B) const;
   bool legalizeExtractVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
                                 MachineIRBuilder &B) const;
   bool legalizeInsertVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,

>From ffdddd603e63b976e2d1f5455b6ea10448a20d77 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Mon, 27 Jan 2025 19:17:12 +0800
Subject: [PATCH 02/15] update

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 14 ++++++++++++--
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 12 ++++--------
 2 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 2e517304d527a..fb91d7446319a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -388,13 +388,16 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
          "Invalid instruction");
   auto &Shuffle = cast<GShuffleVector>(MI);
-  const auto &TLI = getTargetLowering();
   
   auto SrcVec1 = Shuffle.getSrc1Reg();
-  auto SrcVec2 = Shuffle.getSrc2Reg();
+  int SrcVec2 = Shuffle.getSrc2Reg();
   auto Mask = Shuffle.getMask();
 
   int Width = MRI.getType(SrcVec1).getNumElements();
+  int Width2 = MRI.getType(SrcVec2).getNumElements();
+
+  if (!llvm::isPowerOf2_32(Width))
+    return false;
 
   // Check if all elements are extracted from the same vector, or within single
   // vector.
@@ -403,6 +406,13 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   if (MaxValue >= Width && MinValue < Width) {
     return false;
   }
+
+  // Check that the extractee length is power of 2.
+  if ((MaxValue < Width && !llvm::isPowerOf2_32(Width)) ||
+      (MinValue >= Width && !llvm::isPowerOf2_32(Width2))) {
+    return false;
+  }
+
   // Check if the extractee's order is kept:
   if (!std::is_sorted(Mask.begin(), Mask.end())) {
     return false;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 68b0a8b5aecbf..75017c23bb502 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2758,16 +2758,12 @@ bool AMDGPULegalizerInfo::legalizeExtractSubvector(
     // Extract 32-bit registers at a time.
     LLT NewSrcTy =
         UseScalar ? S32 : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-    auto Bitcasted = B.buildBitcast(NewSrcTy, Src).getReg(0);
     LLT NewDstTy = LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+    auto Bitcasted = B.buildBitcast(NewSrcTy, Src);
 
-    SmallVector<Register, 8> Subvectors;
-    for (unsigned i = Start / 2, e = (Start + Count) / 2; i != e; ++i) {
-      auto Subvec = B.buildExtractVectorElementConstant(S32, Bitcasted, i);
-      Subvectors.push_back(Subvec.getReg(0));
-    }
-
-    auto BuildVec = B.buildBuildVector(NewDstTy, Subvectors);
+    auto BuildVec =
+        UseScalar ? Bitcasted
+                  : buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
     B.buildBitcast(Dst, BuildVec.getReg(0));
     MI.eraseFromParent();
     return true;

>From 967138e82c39c560c8b84f8909a58fdfc7d0fa0e Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Mon, 27 Jan 2025 22:15:15 +0800
Subject: [PATCH 03/15] small fix

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 12 ++++++++++--
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp |  4 ++--
 2 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index fb91d7446319a..88b0dfd46652a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -407,14 +407,22 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
+  LLT ExtractTy =  MaxValue < Width ? MRI.getType(SrcVec1) : MRI.getType(SrcVec2);
+
   // Check that the extractee length is power of 2.
   if ((MaxValue < Width && !llvm::isPowerOf2_32(Width)) ||
       (MinValue >= Width && !llvm::isPowerOf2_32(Width2))) {
     return false;
   }
 
-  // Check if the extractee's order is kept:
-  if (!std::is_sorted(Mask.begin(), Mask.end())) {
+  // Check if the extractee's order is kept, and they should be conscecutive.
+  for (size_t i = 1; i < Mask.size(); ++i) {
+    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
+      return false; // Not consecutive
+    }
+  }
+
+  if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {
     return false;
   }
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 75017c23bb502..5873b7a56d6da 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -1834,8 +1834,8 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-    //.fewerElementsIf(isWideVec16(0), changeTo(0, V2S16))
-    .customFor({V8S16, V4S16})
+    .widenScalarOrEltToNextPow2(0)
+    .customFor(AllS16Vectors)
     .lower();
 
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)

>From 9c98f96659dbca047b0c22e7b188a2db5130f925 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 28 Jan 2025 12:44:48 +0800
Subject: [PATCH 04/15] Fix scalar corner issues.

---
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 5873b7a56d6da..33cc0c872c643 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2727,6 +2727,9 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
 static auto buildExtractSubvector(MachineIRBuilder &B, SrcOp Src,
                                   LLT DstTy, unsigned Start) {
   SmallVector<Register, 8> Subvectors;
+  if (!DstTy.isVector()) {
+    return B.buildExtractVectorElementConstant(DstTy, Src, Start);
+  }
   for (unsigned i = Start, e = Start + DstTy.getNumElements(); i != e; ++i) {
     Subvectors.push_back(
         B.buildExtractVectorElementConstant(DstTy.getElementType(), Src, i)
@@ -2754,16 +2757,16 @@ bool AMDGPULegalizerInfo::legalizeExtractSubvector(
   // Split vector size into legal sub vectors, and use build_vector
   // to merge the result.
   if (EltTy.getScalarSizeInBits() == 16 && Start % 2 == 0) {
-    bool UseScalar = Count == 2;
     // Extract 32-bit registers at a time.
-    LLT NewSrcTy =
-        UseScalar ? S32 : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-    LLT NewDstTy = LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+    LLT NewSrcTy = SrcTy.getNumElements() == 2
+                       ? S32
+                       : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
     auto Bitcasted = B.buildBitcast(NewSrcTy, Src);
 
-    auto BuildVec =
-        UseScalar ? Bitcasted
-                  : buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
+    LLT NewDstTy = DstTy.getNumElements() == 2
+                       ? S32
+                       : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+    auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
     B.buildBitcast(Dst, BuildVec.getReg(0));
     MI.eraseFromParent();
     return true;

>From a45ef34ad93772ca0cff5bdc43f5199a447cbe4b Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 07:28:28 -0800
Subject: [PATCH 05/15] update

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 88b0dfd46652a..e28f93711b215 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -37,6 +37,7 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Target/TargetMachine.h"
+#include <algorithm>
 #include <cmath>
 #include <optional>
 #include <tuple>
@@ -415,11 +416,13 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
-  // Check if the extractee's order is kept, and they should be conscecutive.
-  for (size_t i = 1; i < Mask.size(); ++i) {
-    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
-      return false; // Not consecutive
-    }
+  // Check if the extractee's order is kept, and they should be consecutive.
+  bool isConsecutive =
+      std::adjacent_find(Mask.begin(), Mask.end(), [](int a, int b) {
+        return b != a + 1 || b == -1;
+      }) == Mask.end();
+  if (!isConsecutive) {
+    return false;
   }
 
   if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {

>From 97da0ee0509125279f91d36c0b7f15fb8fd14b4b Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 13:03:32 -0800
Subject: [PATCH 06/15] Checkup

---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |  3 +
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp | 14 ++---
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    | 33 ++++++++++
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 63 ++++++++++---------
 4 files changed, 78 insertions(+), 35 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 4e18f5cc913a7..ac2a763b22402 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -347,6 +347,9 @@ class LegalizerHelper {
   LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
                                              LLT HalfTy, LLT ShiftAmtTy);
 
+  LegalizeResult fewerElementsExtractSubvector(MachineInstr &MI,
+                                               unsigned TypeIdx, LLT NarrowTy);
+
   LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
                                                unsigned TypeIdx, LLT NarrowTy);
   LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index e28f93711b215..baf1232e098c8 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -416,18 +416,18 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
-  // Check if the extractee's order is kept, and they should be consecutive.
-  bool isConsecutive =
-      std::adjacent_find(Mask.begin(), Mask.end(), [](int a, int b) {
-        return b != a + 1 || b == -1;
-      }) == Mask.end();
-  if (!isConsecutive) {
-    return false;
+  // Check if the extractee's order is kept, and they should be conscecutive.
+  for (size_t i = 1; i < Mask.size(); ++i) {
+    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
+      return false; // Not consecutive
+    }
   }
 
+  /*
   if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {
     return false;
   }
+  */
 
   Idx = Mask.front();
   return true;
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index d4cb224c35d74..b306ac786e456 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -5463,6 +5463,8 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
     return fewerElementsVectorSeqReductions(MI, TypeIdx, NarrowTy);
   case G_SHUFFLE_VECTOR:
     return fewerElementsVectorShuffle(MI, TypeIdx, NarrowTy);
+  case G_EXTRACT_SUBVECTOR:
+    return fewerElementsExtractSubvector(MI, TypeIdx, NarrowTy);
   case G_FPOWI:
     return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*pow*/});
   case G_BITCAST:
@@ -5643,6 +5645,36 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorShuffle(
   return Legalized;
 }
 
+LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsExtractSubvector(
+    MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
+  assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
+  if (TypeIdx != 0)
+    return UnableToLegalize;
+
+  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+
+
+  if (!isPowerOf2_32(DstTy.getNumElements()))
+    return UnableToLegalize;
+
+  uint64_t SplitIdx = MI.getOperand(2).getImm();
+  unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
+  unsigned SplitParts = DstTy.getNumElements() / NewElts;
+  
+  // Split the sources into NarrowTy size pieces.
+  SmallVector<Register> SplitDstParts;
+
+  for (unsigned i = 0; i < SplitParts; i++) {
+    auto Part = MIRBuilder.buildExtractSubvector(NarrowTy, SrcReg,
+                                                 i * NewElts + SplitIdx);
+    SplitDstParts.push_back(Part.getReg(0));
+  }
+
+  MIRBuilder.buildMergeLikeInstr(DstReg, SplitDstParts);
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorReductions(
     MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
   auto &RdxMI = cast<GVecReduce>(MI);
@@ -6049,6 +6081,7 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
   unsigned Opc = MI.getOpcode();
   switch (Opc) {
   case TargetOpcode::G_IMPLICIT_DEF:
+  case TargetOpcode::G_EXTRACT_SUBVECTOR:
   case TargetOpcode::G_LOAD: {
     if (TypeIdx != 0)
       return UnableToLegalize;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 33cc0c872c643..8156bd326b25f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -108,6 +108,18 @@ static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   };
 }
 
+static LegalizeMutation fewerEltsToSize32Vector(unsigned TypeIdx) {
+  return [=](const LegalityQuery &Query) {
+    const LLT Ty = Query.Types[TypeIdx];
+    const LLT EltTy = Ty.getElementType();
+    unsigned Size = Ty.getSizeInBits();
+    unsigned Pieces = llvm::divideCeil(Size, 32);
+    unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
+    return std::pair(TypeIdx, LLT::scalarOrVector(
+                                  ElementCount::getFixed(NewNumElts), EltTy));
+  };
+}
+
 static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -1834,9 +1846,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-    .widenScalarOrEltToNextPow2(0)
-    .customFor(AllS16Vectors)
-    .lower();
+      .widenScalarOrEltToNextPow2(0, 2)
+      .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
+      .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize32Vector(0))
+      .customIf(sizeIsMultipleOf32(0));
 
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {
@@ -2742,36 +2755,30 @@ bool AMDGPULegalizerInfo::legalizeExtractSubvector(
   MachineInstr &MI, MachineRegisterInfo &MRI,
   MachineIRBuilder &B) const {
   const auto &Instr = llvm::cast<GExtractSubvector>(MI);
-  Register Src = Instr.getSrcVec();
-  Register Dst = MI.getOperand(0).getReg();
-  auto Start = Instr.getIndexImm();
 
-  LLT SrcTy = MRI.getType(Src);
-  LLT DstTy = MRI.getType(Dst);
+  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
+  auto Start = Instr.getIndexImm();
 
   LLT EltTy = SrcTy.getElementType();
   assert(EltTy == DstTy.getElementType());
-  auto Count = DstTy.getNumElements();
-  assert(SrcTy.getNumElements() % 2 == 0 && Count % 2 == 0);
-
-  // Split vector size into legal sub vectors, and use build_vector
-  // to merge the result.
-  if (EltTy.getScalarSizeInBits() == 16 && Start % 2 == 0) {
-    // Extract 32-bit registers at a time.
-    LLT NewSrcTy = SrcTy.getNumElements() == 2
-                       ? S32
-                       : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-    auto Bitcasted = B.buildBitcast(NewSrcTy, Src);
-
-    LLT NewDstTy = DstTy.getNumElements() == 2
-                       ? S32
-                       : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
-    auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
-    B.buildBitcast(Dst, BuildVec.getReg(0));
-    MI.eraseFromParent();
-    return true;
+
+  if (DstTy.getSizeInBits() % 32 != 0) {
+    return false;
   }
-  return false;
+
+  // Extract 32-bit registers at a time.
+  LLT NewSrcTy = SrcTy.getNumElements() == 2
+                     ? S32
+                     : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
+  auto Bitcasted = B.buildBitcast(NewSrcTy, SrcReg);
+
+  LLT NewDstTy = DstTy.getNumElements() == 2
+                     ? S32
+                     : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
+  auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
+  B.buildBitcast(DstReg, BuildVec.getReg(0));
+  MI.eraseFromParent();
+  return true;
 }
 
 bool AMDGPULegalizerInfo::legalizeExtractVectorElt(

>From 357d61bc00eb0ea9dda7808a5027669074444285 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 14:45:07 -0800
Subject: [PATCH 07/15] update

---
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |  8 -----
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 32 ++++++++++++++++---
 2 files changed, 28 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index baf1232e098c8..0b06683053fbd 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -408,8 +408,6 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     return false;
   }
 
-  LLT ExtractTy =  MaxValue < Width ? MRI.getType(SrcVec1) : MRI.getType(SrcVec2);
-
   // Check that the extractee length is power of 2.
   if ((MaxValue < Width && !llvm::isPowerOf2_32(Width)) ||
       (MinValue >= Width && !llvm::isPowerOf2_32(Width2))) {
@@ -423,12 +421,6 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
     }
   }
 
-  /*
-  if (!LI->isLegalOrCustom({TargetOpcode::G_EXTRACT_SUBVECTOR, {ExtractTy}})) {
-    return false;
-  }
-  */
-
   Idx = Mask.front();
   return true;
 }
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 8156bd326b25f..1497027ccb0a4 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -84,6 +84,18 @@ static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
   };
 }
 
+static LegalityPredicate isSmallWideVector(unsigned TypeIdx) {
+  return [=](const LegalityQuery &Query) {
+    const LLT Ty = Query.Types[TypeIdx];
+    if (!Ty.isVector())
+      return false;
+
+    const LLT EltTy = Ty.getElementType();
+    const unsigned EltSize = EltTy.getSizeInBits();
+    return Ty.getSizeInBits() > 32 && EltSize < 32;
+  };
+}
+
 static LegalityPredicate sizeIsMultipleOf32(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -99,6 +111,17 @@ static LegalityPredicate isWideVec16(unsigned TypeIdx) {
   };
 }
 
+static LegalityPredicate isVectorOfEltsNoBiggerThan(unsigned TypeIdx, int Size) {
+  return [=](const LegalityQuery &Query) {
+    const LLT Ty = Query.Types[TypeIdx];
+    if (!Ty.isVector())
+      return false;
+
+    const LLT EltTy = Ty.getElementType();
+    return EltTy.getSizeInBits() <= 32;
+  };
+}
+
 static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -172,6 +195,7 @@ static LegalizeMutation moreElementsToNextExistingRegClass(unsigned TypeIdx) {
   };
 }
 
+
 static LLT getBufferRsrcScalarType(const LLT Ty) {
   if (!Ty.isVector())
     return LLT::scalar(128);
@@ -1846,10 +1870,10 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
   }
 
   getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-      .widenScalarOrEltToNextPow2(0, 2)
-      .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
-      .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize32Vector(0))
-      .customIf(sizeIsMultipleOf32(0));
+      //.widenScalarOrEltToNextPow2(0, 2)
+      //.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
+      //.fewerElementsIf(isSmallWideVector(0), fewerEltsToSize32Vector(0))
+      .customIf(all(isVectorOfEltsNoBiggerThan(0, 32), sizeIsMultipleOf32(0)));
 
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {

>From 16719cd8abf3160fb947fabbd9cbde4bd8003f67 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 15:09:14 -0800
Subject: [PATCH 08/15] remove uneeded parts

---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |  3 -
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |  4 +-
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    | 33 -------
 .../lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp | 89 -------------------
 llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h  |  2 -
 5 files changed, 2 insertions(+), 129 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index ac2a763b22402..4e18f5cc913a7 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -347,9 +347,6 @@ class LegalizerHelper {
   LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
                                              LLT HalfTy, LLT ShiftAmtTy);
 
-  LegalizeResult fewerElementsExtractSubvector(MachineInstr &MI,
-                                               unsigned TypeIdx, LLT NarrowTy);
-
   LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
                                                unsigned TypeIdx, LLT NarrowTy);
   LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 0b06683053fbd..b8a800ba0da03 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -37,7 +37,6 @@
 #include "llvm/Support/ErrorHandling.h"
 #include "llvm/Support/MathExtras.h"
 #include "llvm/Target/TargetMachine.h"
-#include <algorithm>
 #include <cmath>
 #include <optional>
 #include <tuple>
@@ -430,11 +429,12 @@ void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) c
 
   auto SrcVec1 = Shuffle.getSrc1Reg();
   auto SrcVec2 = Shuffle.getSrc2Reg();
+  auto EltTy = MRI.getType(SrcVec1).getElementType();
   int Width = MRI.getType(SrcVec1).getNumElements();
 
   auto SrcVec = Idx < Width ? SrcVec1 : SrcVec2;
 
-  Builder.buildExtractSubvector(MI.getOperand(0).getReg(), SrcVec, Idx);
+  Builder.buildExtract(MI.getOperand(0).getReg(), SrcVec, Idx * EltTy.getSizeInBits());
   MI.eraseFromParent();
 }
 
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index b306ac786e456..d4cb224c35d74 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -5463,8 +5463,6 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
     return fewerElementsVectorSeqReductions(MI, TypeIdx, NarrowTy);
   case G_SHUFFLE_VECTOR:
     return fewerElementsVectorShuffle(MI, TypeIdx, NarrowTy);
-  case G_EXTRACT_SUBVECTOR:
-    return fewerElementsExtractSubvector(MI, TypeIdx, NarrowTy);
   case G_FPOWI:
     return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*pow*/});
   case G_BITCAST:
@@ -5645,36 +5643,6 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorShuffle(
   return Legalized;
 }
 
-LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsExtractSubvector(
-    MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
-  assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
-  if (TypeIdx != 0)
-    return UnableToLegalize;
-
-  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
-
-
-  if (!isPowerOf2_32(DstTy.getNumElements()))
-    return UnableToLegalize;
-
-  uint64_t SplitIdx = MI.getOperand(2).getImm();
-  unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1;
-  unsigned SplitParts = DstTy.getNumElements() / NewElts;
-  
-  // Split the sources into NarrowTy size pieces.
-  SmallVector<Register> SplitDstParts;
-
-  for (unsigned i = 0; i < SplitParts; i++) {
-    auto Part = MIRBuilder.buildExtractSubvector(NarrowTy, SrcReg,
-                                                 i * NewElts + SplitIdx);
-    SplitDstParts.push_back(Part.getReg(0));
-  }
-
-  MIRBuilder.buildMergeLikeInstr(DstReg, SplitDstParts);
-  MI.eraseFromParent();
-  return Legalized;
-}
-
 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorReductions(
     MachineInstr &MI, unsigned int TypeIdx, LLT NarrowTy) {
   auto &RdxMI = cast<GVecReduce>(MI);
@@ -6081,7 +6049,6 @@ LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx,
   unsigned Opc = MI.getOpcode();
   switch (Opc) {
   case TargetOpcode::G_IMPLICIT_DEF:
-  case TargetOpcode::G_EXTRACT_SUBVECTOR:
   case TargetOpcode::G_LOAD: {
     if (TypeIdx != 0)
       return UnableToLegalize;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 1497027ccb0a4..e9e47eaadd557 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -29,7 +29,6 @@
 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/Utils.h"
-#include "llvm/CodeGen/Register.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/IntrinsicsAMDGPU.h"
@@ -84,18 +83,6 @@ static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
   };
 }
 
-static LegalityPredicate isSmallWideVector(unsigned TypeIdx) {
-  return [=](const LegalityQuery &Query) {
-    const LLT Ty = Query.Types[TypeIdx];
-    if (!Ty.isVector())
-      return false;
-
-    const LLT EltTy = Ty.getElementType();
-    const unsigned EltSize = EltTy.getSizeInBits();
-    return Ty.getSizeInBits() > 32 && EltSize < 32;
-  };
-}
-
 static LegalityPredicate sizeIsMultipleOf32(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -111,17 +98,6 @@ static LegalityPredicate isWideVec16(unsigned TypeIdx) {
   };
 }
 
-static LegalityPredicate isVectorOfEltsNoBiggerThan(unsigned TypeIdx, int Size) {
-  return [=](const LegalityQuery &Query) {
-    const LLT Ty = Query.Types[TypeIdx];
-    if (!Ty.isVector())
-      return false;
-
-    const LLT EltTy = Ty.getElementType();
-    return EltTy.getSizeInBits() <= 32;
-  };
-}
-
 static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -131,18 +107,6 @@ static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
   };
 }
 
-static LegalizeMutation fewerEltsToSize32Vector(unsigned TypeIdx) {
-  return [=](const LegalityQuery &Query) {
-    const LLT Ty = Query.Types[TypeIdx];
-    const LLT EltTy = Ty.getElementType();
-    unsigned Size = Ty.getSizeInBits();
-    unsigned Pieces = llvm::divideCeil(Size, 32);
-    unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
-    return std::pair(TypeIdx, LLT::scalarOrVector(
-                                  ElementCount::getFixed(NewNumElts), EltTy));
-  };
-}
-
 static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT Ty = Query.Types[TypeIdx];
@@ -195,7 +159,6 @@ static LegalizeMutation moreElementsToNextExistingRegClass(unsigned TypeIdx) {
   };
 }
 
-
 static LLT getBufferRsrcScalarType(const LLT Ty) {
   if (!Ty.isVector())
     return LLT::scalar(128);
@@ -1869,12 +1832,6 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
       .lower();
   }
 
-  getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
-      //.widenScalarOrEltToNextPow2(0, 2)
-      //.moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
-      //.fewerElementsIf(isSmallWideVector(0), fewerEltsToSize32Vector(0))
-      .customIf(all(isVectorOfEltsNoBiggerThan(0, 32), sizeIsMultipleOf32(0)));
-
   getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
     .unsupportedIf([=](const LegalityQuery &Query) {
         const LLT &EltTy = Query.Types[1].getElementType();
@@ -2170,8 +2127,6 @@ bool AMDGPULegalizerInfo::legalizeCustom(
   case TargetOpcode::G_FMINNUM_IEEE:
   case TargetOpcode::G_FMAXNUM_IEEE:
     return legalizeMinNumMaxNum(Helper, MI);
-  case TargetOpcode::G_EXTRACT_SUBVECTOR:
-    return legalizeExtractSubvector(MI, MRI, B);
   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
     return legalizeExtractVectorElt(MI, MRI, B);
   case TargetOpcode::G_INSERT_VECTOR_ELT:
@@ -2761,50 +2716,6 @@ bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(LegalizerHelper &Helper,
   return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
 }
 
-static auto buildExtractSubvector(MachineIRBuilder &B, SrcOp Src,
-                                  LLT DstTy, unsigned Start) {
-  SmallVector<Register, 8> Subvectors;
-  if (!DstTy.isVector()) {
-    return B.buildExtractVectorElementConstant(DstTy, Src, Start);
-  }
-  for (unsigned i = Start, e = Start + DstTy.getNumElements(); i != e; ++i) {
-    Subvectors.push_back(
-        B.buildExtractVectorElementConstant(DstTy.getElementType(), Src, i)
-            .getReg(0));
-  }
-  return B.buildBuildVector(DstTy, Subvectors);
-}
-
-bool AMDGPULegalizerInfo::legalizeExtractSubvector(
-  MachineInstr &MI, MachineRegisterInfo &MRI,
-  MachineIRBuilder &B) const {
-  const auto &Instr = llvm::cast<GExtractSubvector>(MI);
-
-  auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
-  auto Start = Instr.getIndexImm();
-
-  LLT EltTy = SrcTy.getElementType();
-  assert(EltTy == DstTy.getElementType());
-
-  if (DstTy.getSizeInBits() % 32 != 0) {
-    return false;
-  }
-
-  // Extract 32-bit registers at a time.
-  LLT NewSrcTy = SrcTy.getNumElements() == 2
-                     ? S32
-                     : LLT::fixed_vector(SrcTy.getNumElements() / 2, S32);
-  auto Bitcasted = B.buildBitcast(NewSrcTy, SrcReg);
-
-  LLT NewDstTy = DstTy.getNumElements() == 2
-                     ? S32
-                     : LLT::fixed_vector(DstTy.getNumElements() / 2, S32);
-  auto BuildVec = buildExtractSubvector(B, Bitcasted, NewDstTy, Start / 2);
-  B.buildBitcast(DstReg, BuildVec.getReg(0));
-  MI.eraseFromParent();
-  return true;
-}
-
 bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
   MachineInstr &MI, MachineRegisterInfo &MRI,
   MachineIRBuilder &B) const {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 7b55492afb982..86c15197805d2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -56,8 +56,6 @@ class AMDGPULegalizerInfo final : public LegalizerInfo {
   bool legalizeFPTOI(MachineInstr &MI, MachineRegisterInfo &MRI,
                      MachineIRBuilder &B, bool Signed) const;
   bool legalizeMinNumMaxNum(LegalizerHelper &Helper, MachineInstr &MI) const;
-  bool legalizeExtractSubvector(MachineInstr &MI, MachineRegisterInfo &MRI,
-                                MachineIRBuilder &B) const;
   bool legalizeExtractVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,
                                 MachineIRBuilder &B) const;
   bool legalizeInsertVectorElt(MachineInstr &MI, MachineRegisterInfo &MRI,

>From f1cdcc33429958b989ef85646aec20f59f22f9a9 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 17:36:57 -0800
Subject: [PATCH 09/15] Update AMDGPU GISel tests

---
 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll  | 2 --
 .../CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll     | 3 +--
 2 files changed, 1 insertion(+), 4 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll
index 63b139bb25e77..0c46ccda17640 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.raw.tbuffer.store.d16.ll
@@ -184,8 +184,6 @@ define amdgpu_kernel void @tbuffer_store_d16_xyz(<4 x i32> %rsrc, <4 x half> %da
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
 ; GFX12-PACKED-GISEL-NEXT:    s_wait_kmcnt 0x0
-; GFX12-PACKED-GISEL-NEXT:    s_pack_lh_b32_b16 s6, s6, s6
-; GFX12-PACKED-GISEL-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX12-PACKED-GISEL-NEXT:    tbuffer_store_d16_format_xyzw v[0:1], off, s[0:3], null format:[BUF_FMT_10_10_10_2_SNORM]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll
index 17ebb1a835462..2d5c95156c6f2 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.struct.tbuffer.store.d16.ll
@@ -204,10 +204,9 @@ define amdgpu_kernel void @tbuffer_store_d16_xyz(<4 x i32> %rsrc, <4 x half> %da
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b96 s[8:10], s[4:5], 0x10
 ; GFX12-PACKED-GISEL-NEXT:    s_load_b128 s[0:3], s[4:5], 0x0
 ; GFX12-PACKED-GISEL-NEXT:    s_wait_kmcnt 0x0
-; GFX12-PACKED-GISEL-NEXT:    s_pack_lh_b32_b16 s8, s8, s8
-; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v1, s9
+; GFX12-PACKED-GISEL-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX12-PACKED-GISEL-NEXT:    tbuffer_store_d16_format_xyzw v[0:1], v2, s[0:3], null format:[BUF_FMT_10_10_10_2_SNORM] idxen
 ; GFX12-PACKED-GISEL-NEXT:    s_endpgm
 main_body:

>From 8af99ab174450417b4330e902df688cbda3b3d4c Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 18:36:36 -0800
Subject: [PATCH 10/15] Fix some AArch64 tests

---
 llvm/test/CodeGen/AArch64/aarch64-smull.ll |  86 ++++++++---------
 llvm/test/CodeGen/AArch64/arm64-vabs.ll    | 105 ++++++++++++---------
 llvm/test/CodeGen/AArch64/arm64-vadd.ll    |  67 ++++++++-----
 llvm/test/CodeGen/AArch64/arm64-vshift.ll  |  30 +++---
 4 files changed, 156 insertions(+), 132 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 3b589d3480179..390953490e87d 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -4,7 +4,16 @@
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; CHECK-GI:       warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v4i32_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v4i32_uzp1
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pmlsl_pmlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v8i16_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v4i32_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v4i32_uzp1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for do_stuff
 
 define <8 x i16> @smull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_v8i8_v8i16:
@@ -2030,9 +2039,8 @@ define void @smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2065,9 +2073,8 @@ define void @umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
+; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2100,9 +2107,8 @@ define void @smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
+; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2135,9 +2141,8 @@ define void @umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
+; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2198,14 +2203,11 @@ define void @smlsl_smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
-; CHECK-GI-NEXT:    smull v2.8h, v3.8b, v2.8b
-; CHECK-GI-NEXT:    smlal v2.8h, v0.8b, v4.8b
-; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2244,14 +2246,11 @@ define void @umlsl_umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
-; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
-; CHECK-GI-NEXT:    umull v2.8h, v3.8b, v2.8b
-; CHECK-GI-NEXT:    umlal v2.8h, v0.8b, v4.8b
-; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
+; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
+; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2290,14 +2289,11 @@ define void @smlsl_smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
-; CHECK-GI-NEXT:    smull v2.4s, v3.4h, v2.4h
-; CHECK-GI-NEXT:    smlal v2.4s, v0.4h, v4.4h
-; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2336,14 +2332,11 @@ define void @umlsl_umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q4, q2, [x1]
-; CHECK-GI-NEXT:    mov d3, v0.d[1]
-; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
-; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
-; CHECK-GI-NEXT:    umull v2.4s, v3.4h, v2.4h
-; CHECK-GI-NEXT:    umlal v2.4s, v0.4h, v4.4h
-; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
-; CHECK-GI-NEXT:    str q0, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x1]
+; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
+; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
+; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2380,9 +2373,8 @@ define <2 x i32> @do_stuff(<2 x i64> %0, <2 x i64> %1) {
 ;
 ; CHECK-GI-LABEL: do_stuff:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
-; CHECK-GI-NEXT:    mov d2, v1.d[1]
-; CHECK-GI-NEXT:    smull v0.2d, v2.2s, v0.2s
+; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v0.4s
+; CHECK-GI-NEXT:    smull2 v0.2d, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-GI-NEXT:    add v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index cc8568709ea21..084ad1a181c12 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -2,9 +2,26 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:  warning: Instruction selection used fallback path for fabds
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fabdd
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uabd_i64
+; CHECK-GI:  warning: Instruction selection used fallback path for sabdl2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_8h
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_4s
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_2d
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabds
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabdd
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_from_extract_dup
+; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabd_i64
+
 
 define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl8h:
@@ -58,9 +75,9 @@ define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    sabdl2.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    sabdl.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -81,9 +98,9 @@ define <4 x i32> @sabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    sabdl2.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    sabdl.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -104,9 +121,9 @@ define <2 x i64> @sabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -169,9 +186,9 @@ define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uabdl2.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uabdl.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -193,9 +210,9 @@ define <4 x i32> @uabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uabdl2.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uabdl.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -216,9 +233,9 @@ define <2 x i64> @uabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1146,10 +1163,10 @@ define <8 x i16> @sabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    sabal2.8h v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    sabal.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1173,10 +1190,10 @@ define <4 x i32> @sabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    sabal2.4s v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    sabal.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1200,10 +1217,10 @@ define <2 x i64> @sabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    sabal2.2d v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    sabal.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1278,10 +1295,10 @@ define <8 x i16> @uabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    uabal2.8h v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    uabal.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1305,10 +1322,10 @@ define <4 x i32> @uabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    uabal2.4s v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    uabal.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1332,10 +1349,10 @@ define <2 x i64> @uabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    uabal2.2d v0, v1, v2
+; CHECK-GI-NEXT:    ldr d1, [x0, #8]
+; CHECK-GI-NEXT:    ldr d2, [x1, #8]
+; CHECK-GI-NEXT:    uabal.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1608,9 +1625,8 @@ define <2 x i64> @uabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.2s v1, w0
-; CHECK-GI-NEXT:    mov d0, v0[1]
-; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.4s v1, w0
+; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1643,9 +1659,8 @@ define <2 x i64> @sabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: sabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.2s v1, w0
-; CHECK-GI-NEXT:    mov d0, v0[1]
-; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.4s v1, w0
+; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vadd.ll b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
index d982dbbb1f69b..017873158e562 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
@@ -2,8 +2,29 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:         warning: Instruction selection used fallback path for saddlp1d
+; CHECK-GI:    warning: Instruction selection used fallback path for saddl2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_8h
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_4s
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_2d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddlp1d
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddlp1d
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl_duplhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_duplhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl2_duprhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl_duplhs
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl2_duplhs
+
 
 define <8 x i8> @addhn8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: addhn8b:
@@ -416,8 +437,8 @@ define <8 x i16> @uaddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uaddw2 v0.8h, v0.8h, v1.16b
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uaddw v0.8h, v0.8h, v1.8b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -440,8 +461,8 @@ define <4 x i32> @uaddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uaddw2 v0.4s, v0.4s, v1.8h
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uaddw v0.4s, v0.4s, v1.4h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -464,8 +485,8 @@ define <2 x i64> @uaddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    uaddw2 v0.2d, v0.2d, v1.4s
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    uaddw v0.2d, v0.2d, v1.2s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -530,8 +551,8 @@ define <8 x i16> @saddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    saddw2 v0.8h, v0.8h, v1.16b
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    saddw v0.8h, v0.8h, v1.8b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -554,8 +575,8 @@ define <4 x i32> @saddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    saddw2 v0.4s, v0.4s, v1.8h
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    saddw v0.4s, v0.4s, v1.4h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -578,8 +599,8 @@ define <2 x i64> @saddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr q1, [x1]
-; CHECK-GI-NEXT:    saddw2 v0.2d, v0.2d, v1.4s
+; CHECK-GI-NEXT:    ldr d1, [x1, #8]
+; CHECK-GI-NEXT:    saddw v0.2d, v0.2d, v1.2s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -1048,9 +1069,8 @@ define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uaddl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    ushll v1.2d, v1.2s, #0
-; CHECK-GI-NEXT:    uaddw2 v0.2d, v1.2d, v0.4s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    uaddl2 v0.2d, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1091,9 +1111,8 @@ define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: saddl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
-; CHECK-GI-NEXT:    saddw2 v0.2d, v1.2d, v0.4s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    saddl2 v0.2d, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
@@ -1134,9 +1153,8 @@ define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: usubl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
-; CHECK-GI-NEXT:    usubl v0.2d, v0.2s, v1.2s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    usubl2 v0.2d, v0.4s, v1.4s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1177,9 +1195,8 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: ssubl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.2s, w0
-; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
-; CHECK-GI-NEXT:    ssubw2 v0.2d, v1.2d, v0.4s
+; CHECK-GI-NEXT:    dup v1.4s, w0
+; CHECK-GI-NEXT:    ssubl2 v0.2d, v1.4s, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 2f543cc324bc2..343b9c98fc205 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:       warning: Instruction selection used fallback path for sqshl1d
+; CHECK-GI:  warning: Instruction selection used fallback path for sqshl1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl1d_constant
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar_constant
@@ -82,15 +82,22 @@
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn16b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn8h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn4s
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_8h
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_4s
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift_m1
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_8h
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_4s
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra_scalar
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for shll_high
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli8b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli4h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli2s
@@ -2227,8 +2234,7 @@ define <8 x i16> @ushll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    ushll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2247,8 +2253,7 @@ define <4 x i32> @ushll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2267,8 +2272,7 @@ define <2 x i64> @ushll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    ushll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -2822,8 +2826,7 @@ define <8 x i16> @sshll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    sshll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2842,8 +2845,7 @@ define <4 x i32> @sshll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    sshll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2862,8 +2864,7 @@ define <2 x i64> @sshll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    ldr d0, [x0, #8]
 ; CHECK-GI-NEXT:    sshll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -4027,8 +4028,7 @@ define <4 x i32> @shll_high(<8 x i16> %in) {
 ;
 ; CHECK-GI-LABEL: shll_high:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ushll2 v0.4s, v0.8h, #0
-; CHECK-GI-NEXT:    shl v0.4s, v0.4s, #16
+; CHECK-GI-NEXT:    shll2 v0.4s, v0.8h, #16
 ; CHECK-GI-NEXT:    ret
   %extract = shufflevector <8 x i16> %in, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext = zext <4 x i16> %extract to <4 x i32>

>From 7d48285320f6c05979e7dbd5917a9a65d9f81311 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Tue, 11 Feb 2025 18:36:48 -0800
Subject: [PATCH 11/15] Fix crash

---
 llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h | 2 +-
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp        | 8 +++++++-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index c1c303fd18e6b..66a607fe1f231 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -264,7 +264,7 @@ class CombinerHelper {
   void applyCombineShuffleConcat(MachineInstr &MI,
                                  SmallVector<Register> &Ops) const;
 
-  /// Replace \p MI with a narrow extract_subvector.
+  /// Replace \p MI with a narrowed vector extract.
   bool matchCombineShuffleExtract(MachineInstr &MI, int64_t &IsFirst) const;
   void applyCombineShuffleExtract(MachineInstr &MI, int64_t IsFirst) const;
 
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index b8a800ba0da03..24945d4591b4b 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -391,8 +391,14 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   
   auto SrcVec1 = Shuffle.getSrc1Reg();
   int SrcVec2 = Shuffle.getSrc2Reg();
-  auto Mask = Shuffle.getMask();
 
+  LLT SrcVec1Type = MRI.getType(SrcVec1);
+  LLT SrcVec2Type = MRI.getType(SrcVec2);
+  if (!SrcVec1Type.isVector() || !SrcVec2Type.isVector()) {
+    return false;
+  }
+
+  auto Mask = Shuffle.getMask();
   int Width = MRI.getType(SrcVec1).getNumElements();
   int Width2 = MRI.getType(SrcVec2).getNumElements();
 

>From 77a760f223ca263c90798bf00d1b10d39ece1aa4 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 07:56:44 -0800
Subject: [PATCH 12/15] Only use the pattern in AMDGPU

---
 llvm/include/llvm/Target/GlobalISel/Combine.td | 2 +-
 llvm/lib/Target/AMDGPU/AMDGPUCombine.td        | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 30316305d9e4f..17f1825cea7e4 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -2033,7 +2033,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
     and_or_disjoint_mask, fma_combines, fold_binop_into_select,
     sub_add_reg, select_to_minmax,
     fsub_to_fneg, commute_constant_to_rhs, match_ands, match_ors,
-    simplify_neg_minmax, combine_concat_vector, combine_shuffle_vector,
+    simplify_neg_minmax, combine_concat_vector,
     sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
     combine_use_vector_truncate, merge_combines, overflow_combines]>;
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
index da47aaf8a3b5c..f06281af34968 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCombine.td
@@ -163,7 +163,7 @@ def gfx8_combines : GICombineGroup<[expand_promoted_fmed3]>;
 
 def AMDGPUPreLegalizerCombiner: GICombiner<
   "AMDGPUPreLegalizerCombinerImpl",
-  [all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16, foldable_fneg]> {
+  [all_combines, combine_fmul_with_select_to_fldexp, clamp_i64_to_i16, foldable_fneg, combine_shuffle_vector]> {
   let CombineAllMethodName = "tryCombineAllImpl";
 }
 

>From cfb19e0eff6e3786d1a8a77d4d2be5dc3243eb14 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 07:57:12 -0800
Subject: [PATCH 13/15] Revert "Fix some AArch64 tests"

This reverts commit 8af99ab174450417b4330e902df688cbda3b3d4c.
---
 llvm/test/CodeGen/AArch64/aarch64-smull.ll |  86 +++++++++--------
 llvm/test/CodeGen/AArch64/arm64-vabs.ll    | 105 +++++++++------------
 llvm/test/CodeGen/AArch64/arm64-vadd.ll    |  67 +++++--------
 llvm/test/CodeGen/AArch64/arm64-vshift.ll  |  30 +++---
 4 files changed, 132 insertions(+), 156 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/aarch64-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
index 390953490e87d..3b589d3480179 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-smull.ll
@@ -4,16 +4,7 @@
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 ; CHECK-GI:       warning: Instruction selection used fallback path for pmlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl2_v4i32_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl2_v4i32_uzp1
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for pmlsl_pmlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v8i16_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for smlsl_smlsl2_v4i32_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for umlsl_umlsl2_v4i32_uzp1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for do_stuff
 
 define <8 x i16> @smull_v8i8_v8i16(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull_v8i8_v8i16:
@@ -2039,8 +2030,9 @@ define void @smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2073,8 +2065,9 @@ define void @umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v0.16b, v2.16b
-; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2107,8 +2100,9 @@ define void @smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
-; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2141,8 +2135,9 @@ define void @umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3) {
 ; CHECK-GI-LABEL: umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q2, [x1, #16]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v0.8h, v2.8h
-; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
 ; CHECK-GI-NEXT:    str q1, [x0]
 ; CHECK-GI-NEXT:    ret
   %5 = getelementptr inbounds i32, ptr %3, i64 4
@@ -2203,11 +2198,14 @@ define void @smlsl_smlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
-; CHECK-GI-NEXT:    smlsl v1.8h, v0.8b, v2.8b
-; CHECK-GI-NEXT:    smlsl2 v1.8h, v0.16b, v2.16b
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
+; CHECK-GI-NEXT:    smull v2.8h, v3.8b, v2.8b
+; CHECK-GI-NEXT:    smlal v2.8h, v0.8b, v4.8b
+; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2246,11 +2244,14 @@ define void @umlsl_umlsl2_v8i16_uzp1(<16 x i8> %0, <8 x i16> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v8i16_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.16b, v2.16b, v3.16b
-; CHECK-GI-NEXT:    umlsl v1.8h, v0.8b, v2.8b
-; CHECK-GI-NEXT:    umlsl2 v1.8h, v0.16b, v2.16b
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.8b, v2.8h
+; CHECK-GI-NEXT:    xtn v4.8b, v4.8h
+; CHECK-GI-NEXT:    umull v2.8h, v3.8b, v2.8b
+; CHECK-GI-NEXT:    umlal v2.8h, v0.8b, v4.8b
+; CHECK-GI-NEXT:    sub v0.8h, v1.8h, v2.8h
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <8 x i16>, ptr %3, align 4
@@ -2289,11 +2290,14 @@ define void @smlsl_smlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: smlsl_smlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
-; CHECK-GI-NEXT:    smlsl v1.4s, v0.4h, v2.4h
-; CHECK-GI-NEXT:    smlsl2 v1.4s, v0.8h, v2.8h
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
+; CHECK-GI-NEXT:    smull v2.4s, v3.4h, v2.4h
+; CHECK-GI-NEXT:    smlal v2.4s, v0.4h, v4.4h
+; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2332,11 +2336,14 @@ define void @umlsl_umlsl2_v4i32_uzp1(<8 x i16> %0, <4 x i32> %1, ptr %2, ptr %3,
 ;
 ; CHECK-GI-LABEL: umlsl_umlsl2_v4i32_uzp1:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldp q2, q3, [x1]
-; CHECK-GI-NEXT:    uzp1 v2.8h, v2.8h, v3.8h
-; CHECK-GI-NEXT:    umlsl v1.4s, v0.4h, v2.4h
-; CHECK-GI-NEXT:    umlsl2 v1.4s, v0.8h, v2.8h
-; CHECK-GI-NEXT:    str q1, [x0]
+; CHECK-GI-NEXT:    ldp q4, q2, [x1]
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    xtn v2.4h, v2.4s
+; CHECK-GI-NEXT:    xtn v4.4h, v4.4s
+; CHECK-GI-NEXT:    umull v2.4s, v3.4h, v2.4h
+; CHECK-GI-NEXT:    umlal v2.4s, v0.4h, v4.4h
+; CHECK-GI-NEXT:    sub v0.4s, v1.4s, v2.4s
+; CHECK-GI-NEXT:    str q0, [x0]
 ; CHECK-GI-NEXT:    ret
 entry:
   %5 = load <4 x i32>, ptr %3, align 4
@@ -2373,8 +2380,9 @@ define <2 x i32> @do_stuff(<2 x i64> %0, <2 x i64> %1) {
 ;
 ; CHECK-GI-LABEL: do_stuff:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    uzp1 v0.4s, v0.4s, v0.4s
-; CHECK-GI-NEXT:    smull2 v0.2d, v1.4s, v0.4s
+; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
+; CHECK-GI-NEXT:    mov d2, v1.d[1]
+; CHECK-GI-NEXT:    smull v0.2d, v2.2s, v0.2s
 ; CHECK-GI-NEXT:    xtn v0.2s, v0.2d
 ; CHECK-GI-NEXT:    add v0.2s, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
diff --git a/llvm/test/CodeGen/AArch64/arm64-vabs.ll b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
index 084ad1a181c12..cc8568709ea21 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vabs.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vabs.ll
@@ -2,26 +2,9 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple | FileCheck -check-prefixes=CHECK,CHECK-SD %s
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:  warning: Instruction selection used fallback path for sabdl2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabal2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_8h
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_4s
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabal2_2d
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabds
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for fabdd
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabdl2_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for sabdl2_from_extract_dup
-; CHECK-GI-NEXT:  arning: Instruction selection used fallback path for uabd_i64
-
+; CHECK-GI:  warning: Instruction selection used fallback path for fabds
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for fabdd
+; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uabd_i64
 
 define <8 x i16> @sabdl8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: sabdl8h:
@@ -75,9 +58,9 @@ define <8 x i16> @sabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    sabdl.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    sabdl2.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -98,9 +81,9 @@ define <4 x i32> @sabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    sabdl.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    sabdl2.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -121,9 +104,9 @@ define <2 x i64> @sabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: sabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -186,9 +169,9 @@ define <8 x i16> @uabdl2_8h(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uabdl.8h v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uabdl2.8h v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -210,9 +193,9 @@ define <4 x i32> @uabdl2_4s(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uabdl.4s v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uabdl2.4s v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -233,9 +216,9 @@ define <2 x i64> @uabdl2_2d(ptr %A, ptr %B) nounwind {
 ;
 ; CHECK-GI-LABEL: uabdl2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1163,10 +1146,10 @@ define <8 x i16> @sabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_8h:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    sabal.8h v0, v1, v2
+; CHECK-GI-NEXT:    sabal2.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1190,10 +1173,10 @@ define <4 x i32> @sabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_4s:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    sabal.4s v0, v1, v2
+; CHECK-GI-NEXT:    sabal2.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1217,10 +1200,10 @@ define <2 x i64> @sabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: sabal2_2d:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    sabal.2d v0, v1, v2
+; CHECK-GI-NEXT:    sabal2.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1295,10 +1278,10 @@ define <8 x i16> @uabal2_8h(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_8h:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    uabal.8h v0, v1, v2
+; CHECK-GI-NEXT:    uabal2.8h v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
   %load2 = load <16 x i8>, ptr %B
@@ -1322,10 +1305,10 @@ define <4 x i32> @uabal2_4s(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_4s:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    uabal.4s v0, v1, v2
+; CHECK-GI-NEXT:    uabal2.4s v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
   %load2 = load <8 x i16>, ptr %B
@@ -1349,10 +1332,10 @@ define <2 x i64> @uabal2_2d(ptr %A, ptr %B, ptr %C) nounwind {
 ;
 ; CHECK-GI-LABEL: uabal2_2d:
 ; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldr q1, [x0]
+; CHECK-GI-NEXT:    ldr q2, [x1]
 ; CHECK-GI-NEXT:    ldr q0, [x2]
-; CHECK-GI-NEXT:    ldr d1, [x0, #8]
-; CHECK-GI-NEXT:    ldr d2, [x1, #8]
-; CHECK-GI-NEXT:    uabal.2d v0, v1, v2
+; CHECK-GI-NEXT:    uabal2.2d v0, v1, v2
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
   %load2 = load <4 x i32>, ptr %B
@@ -1625,8 +1608,9 @@ define <2 x i64> @uabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.4s v1, w0
-; CHECK-GI-NEXT:    uabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.2s v1, w0
+; CHECK-GI-NEXT:    mov d0, v0[1]
+; CHECK-GI-NEXT:    uabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1659,8 +1643,9 @@ define <2 x i64> @sabdl2_from_extract_dup(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: sabdl2_from_extract_dup:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup.4s v1, w0
-; CHECK-GI-NEXT:    sabdl2.2d v0, v0, v1
+; CHECK-GI-NEXT:    dup.2s v1, w0
+; CHECK-GI-NEXT:    mov d0, v0[1]
+; CHECK-GI-NEXT:    sabdl.2d v0, v0, v1
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vadd.ll b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
index 017873158e562..d982dbbb1f69b 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vadd.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vadd.ll
@@ -2,29 +2,8 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for saddl2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddw2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_8h
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_4s
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddw2_2d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddlp1d
+; CHECK-GI:         warning: Instruction selection used fallback path for saddlp1d
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddlp1d
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uaddl2_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl_duplhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for saddl2_duplhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for usubl2_duprhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl_duplhs
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for ssubl2_duplhs
-
 
 define <8 x i8> @addhn8b(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: addhn8b:
@@ -437,8 +416,8 @@ define <8 x i16> @uaddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uaddw v0.8h, v0.8h, v1.8b
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uaddw2 v0.8h, v0.8h, v1.16b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -461,8 +440,8 @@ define <4 x i32> @uaddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uaddw v0.4s, v0.4s, v1.4h
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uaddw2 v0.4s, v0.4s, v1.8h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -485,8 +464,8 @@ define <2 x i64> @uaddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: uaddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    uaddw v0.2d, v0.2d, v1.2s
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    uaddw2 v0.2d, v0.2d, v1.4s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -551,8 +530,8 @@ define <8 x i16> @saddw2_8h(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_8h:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    saddw v0.8h, v0.8h, v1.8b
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    saddw2 v0.8h, v0.8h, v1.16b
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <8 x i16>, ptr %A
 
@@ -575,8 +554,8 @@ define <4 x i32> @saddw2_4s(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_4s:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    saddw v0.4s, v0.4s, v1.4h
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    saddw2 v0.4s, v0.4s, v1.8h
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <4 x i32>, ptr %A
 
@@ -599,8 +578,8 @@ define <2 x i64> @saddw2_2d(ptr %A, ptr %B) nounwind {
 ; CHECK-GI-LABEL: saddw2_2d:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    ldr q0, [x0]
-; CHECK-GI-NEXT:    ldr d1, [x1, #8]
-; CHECK-GI-NEXT:    saddw v0.2d, v0.2d, v1.2s
+; CHECK-GI-NEXT:    ldr q1, [x1]
+; CHECK-GI-NEXT:    saddw2 v0.2d, v0.2d, v1.4s
 ; CHECK-GI-NEXT:    ret
         %tmp1 = load <2 x i64>, ptr %A
 
@@ -1069,8 +1048,9 @@ define <2 x i64> @uaddl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: uaddl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    uaddl2 v0.2d, v0.4s, v1.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    ushll v1.2d, v1.2s, #0
+; CHECK-GI-NEXT:    uaddw2 v0.2d, v1.2d, v0.4s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1111,8 +1091,9 @@ define <2 x i64> @saddl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: saddl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    saddl2 v0.2d, v1.4s, v0.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
+; CHECK-GI-NEXT:    saddw2 v0.2d, v1.2d, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
@@ -1153,8 +1134,9 @@ define <2 x i64> @usubl2_duprhs(<4 x i32> %lhs, i32 %rhs) {
 ;
 ; CHECK-GI-LABEL: usubl2_duprhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    usubl2 v0.2d, v0.4s, v1.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
+; CHECK-GI-NEXT:    usubl v0.2d, v0.2s, v1.2s
 ; CHECK-GI-NEXT:    ret
   %rhsvec.tmp = insertelement <2 x i32> undef, i32 %rhs, i32 0
   %rhsvec = insertelement <2 x i32> %rhsvec.tmp, i32 %rhs, i32 1
@@ -1195,8 +1177,9 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs, <4 x i32> %rhs) {
 ;
 ; CHECK-GI-LABEL: ssubl2_duplhs:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    dup v1.4s, w0
-; CHECK-GI-NEXT:    ssubl2 v0.2d, v1.4s, v0.4s
+; CHECK-GI-NEXT:    dup v1.2s, w0
+; CHECK-GI-NEXT:    sshll v1.2d, v1.2s, #0
+; CHECK-GI-NEXT:    ssubw2 v0.2d, v1.2d, v0.4s
 ; CHECK-GI-NEXT:    ret
   %lhsvec.tmp = insertelement <2 x i32> undef, i32 %lhs, i32 0
   %lhsvec = insertelement <2 x i32> %lhsvec.tmp, i32 %lhs, i32 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-vshift.ll b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
index 343b9c98fc205..2f543cc324bc2 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vshift.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vshift.ll
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=0 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple=arm64-eabi -global-isel=1 -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:  warning: Instruction selection used fallback path for sqshl1d
+; CHECK-GI:       warning: Instruction selection used fallback path for sqshl1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl1d_constant
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sqshl_scalar_constant
@@ -82,22 +82,15 @@
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn16b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn8h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for uqshrn4s
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_8h
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_4s
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ushll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_ushl_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_vscalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for neon_sshll_scalar_constant_shift_m1
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_8h
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_4s
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sshll2_2d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for ursra_scalar
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra1d
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for srsra_scalar
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for shll_high
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli8b
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli4h
 ; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for sli2s
@@ -2234,7 +2227,8 @@ define <8 x i16> @ushll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    ushll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2253,7 +2247,8 @@ define <4 x i32> @ushll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    ushll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2272,7 +2267,8 @@ define <2 x i64> @ushll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: ushll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    ushll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -2826,7 +2822,8 @@ define <8 x i16> @sshll2_8h(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_8h:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    sshll v0.8h, v0.8b, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <16 x i8>, ptr %A
@@ -2845,7 +2842,8 @@ define <4 x i32> @sshll2_4s(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_4s:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    sshll v0.4s, v0.4h, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <8 x i16>, ptr %A
@@ -2864,7 +2862,8 @@ define <2 x i64> @sshll2_2d(ptr %A) nounwind {
 ;
 ; CHECK-GI-LABEL: sshll2_2d:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    ldr d0, [x0, #8]
+; CHECK-GI-NEXT:    ldr q0, [x0]
+; CHECK-GI-NEXT:    mov d0, v0.d[1]
 ; CHECK-GI-NEXT:    sshll v0.2d, v0.2s, #1
 ; CHECK-GI-NEXT:    ret
   %load1 = load <4 x i32>, ptr %A
@@ -4028,7 +4027,8 @@ define <4 x i32> @shll_high(<8 x i16> %in) {
 ;
 ; CHECK-GI-LABEL: shll_high:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    shll2 v0.4s, v0.8h, #16
+; CHECK-GI-NEXT:    ushll2 v0.4s, v0.8h, #0
+; CHECK-GI-NEXT:    shl v0.4s, v0.4s, #16
 ; CHECK-GI-NEXT:    ret
   %extract = shufflevector <8 x i16> %in, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
   %ext = zext <4 x i16> %extract to <4 x i32>

>From 5545ad6b0f2e0592895ec2d318b205049b0d5966 Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 10:02:46 -0800
Subject: [PATCH 14/15] Fix linting and add test

---
 llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp | 11 +++++++----
 .../CodeGen/AMDGPU/GlobalISel/shufflevector.ll | 18 ++++++++++++++++++
 2 files changed, 25 insertions(+), 4 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 24945d4591b4b..34e7b21ec43f6 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -384,11 +384,12 @@ void CombinerHelper::applyCombineConcatVectors(
   MI.eraseFromParent();
 }
 
-bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx) const {
+bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI,
+                                                int64_t &Idx) const {
   assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
          "Invalid instruction");
   auto &Shuffle = cast<GShuffleVector>(MI);
-  
+
   auto SrcVec1 = Shuffle.getSrc1Reg();
   int SrcVec2 = Shuffle.getSrc2Reg();
 
@@ -430,7 +431,8 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI, int64_t &Idx)
   return true;
 }
 
-void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) const {
+void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI,
+                                                int64_t Idx) const {
   auto &Shuffle = cast<GShuffleVector>(MI);
 
   auto SrcVec1 = Shuffle.getSrc1Reg();
@@ -440,7 +442,8 @@ void CombinerHelper::applyCombineShuffleExtract(MachineInstr &MI, int64_t Idx) c
 
   auto SrcVec = Idx < Width ? SrcVec1 : SrcVec2;
 
-  Builder.buildExtract(MI.getOperand(0).getReg(), SrcVec, Idx * EltTy.getSizeInBits());
+  Builder.buildExtract(MI.getOperand(0).getReg(), SrcVec,
+                       Idx * EltTy.getSizeInBits());
   MI.eraseFromParent();
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll
new file mode 100644
index 0000000000000..09274c4d3626b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector.ll
@@ -0,0 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -march=amdgcn -mtriple=amdgcn-amd-hmcsa -mcpu=gfx942 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX942 %s
+
+define void @shuffle_to_extract(ptr addrspace(3) %in, ptr addrspace(3) %out) {
+; GFX942-LABEL: shuffle_to_extract:
+; GFX942:       ; %bb.0:
+; GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX942-NEXT:    ds_read2_b64 v[2:5], v0 offset1:1
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    ds_write_b64 v1, v[4:5]
+; GFX942-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX942-NEXT:    s_setpc_b64 s[30:31]
+  %val = load <8 x half>, ptr addrspace(3) %in, align 8
+  %res = shufflevector <8 x half> %val, <8 x half> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  store <4 x half> %res, ptr addrspace(3) %out, align 8
+  ret void
+}
+

>From aff54ab402d09b1fbaf519dbf32d362d5ba632bb Mon Sep 17 00:00:00 2001
From: Alan Li <me at alanli.org>
Date: Wed, 12 Feb 2025 17:58:11 -0800
Subject: [PATCH 15/15] Adding mir test case

---
 .../lib/CodeGen/GlobalISel/CombinerHelper.cpp |   4 +-
 .../prelegalizer-combiner-shuffle.mir         | 132 ++++++++++++++++++
 2 files changed, 134 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir

diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 34e7b21ec43f6..165dd68193d3a 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -420,9 +420,9 @@ bool CombinerHelper::matchCombineShuffleExtract(MachineInstr &MI,
     return false;
   }
 
-  // Check if the extractee's order is kept, and they should be conscecutive.
+  // Check if the extractee's order is kept, and they should be consecutive.
   for (size_t i = 1; i < Mask.size(); ++i) {
-    if (Mask[i] != Mask[i - 1] + 1 || Mask[i] == -1) {
+    if (Mask[i] != Mask[i - 1] + 1) {
       return false; // Not consecutive
     }
   }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
new file mode 100644
index 0000000000000..46ad02f8aded8
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
@@ -0,0 +1,132 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs -o - %s | FileCheck %s
+
+---
+name: shuffle_vector_to_extract
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<4 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 64
+    ; CHECK-NEXT: G_STORE [[EXTRACT]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<4 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(4, 5, 6, 7)
+    G_STORE %11(<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
+    SI_RETURN
+...
+
+---
+name: shuffle_vector_to_extract2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract2
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<2 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 48
+    ; CHECK-NEXT: G_STORE [[EXTRACT]](<2 x s16>), [[COPY1]](p3) :: (store (<2 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<2 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(3, 4)
+    G_STORE %11(<2 x s16>), %1(p3) :: (store (<2 x s16>), addrspace 3)
+    SI_RETURN
+
+...
+
+---
+name: shuffle_vector_to_extract_odd_elements
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract_odd_elements
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[LOAD]](<8 x s16>), 0
+    ; CHECK-NEXT: G_STORE [[EXTRACT]](<3 x s16>), [[COPY1]](p3) :: (store (<3 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<3 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(0, 1, 2)
+    G_STORE %11(<3 x s16>), %1(p3) :: (store (<3 x s16>), addrspace 3)
+    SI_RETURN
+...
+
+
+---
+name: shuffle_vector_to_extract_minus_1_no_conversion
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract_minus_1_no_conversion
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s16>) = G_SHUFFLE_VECTOR [[LOAD]](<8 x s16>), [[DEF]], shufflemask(4, 5, undef, 7)
+    ; CHECK-NEXT: G_STORE [[SHUF]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<4 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(4, 5, -1, 7)
+    G_STORE %11(<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
+    SI_RETURN
+...
+
+---
+name: shuffle_vector_to_extract_across_vectors_no_conversion
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0, $vgpr1
+
+    ; CHECK-LABEL: name: shuffle_vector_to_extract_across_vectors_no_conversion
+    ; CHECK: liveins: $vgpr0, $vgpr1
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
+    ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<8 x s16>) = G_IMPLICIT_DEF
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    ; CHECK-NEXT: [[SHUF:%[0-9]+]]:_(<4 x s16>) = G_SHUFFLE_VECTOR [[LOAD]](<8 x s16>), [[DEF]], shufflemask(6, 7, undef, undef)
+    ; CHECK-NEXT: G_STORE [[SHUF]](<4 x s16>), [[COPY1]](p3) :: (store (<4 x s16>), addrspace 3)
+    ; CHECK-NEXT: SI_RETURN
+    %0:_(p3) = COPY $vgpr0
+    %1:_(p3) = COPY $vgpr1
+    %12:_(<8 x s16>) = G_IMPLICIT_DEF
+    %10:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
+    %11:_(<4 x s16>) = G_SHUFFLE_VECTOR %10(<8 x s16>), %12, shufflemask(6, 7, 8, 9)
+    G_STORE %11(<4 x s16>), %1(p3) :: (store (<4 x s16>), addrspace 3)
+    SI_RETURN
+...
+



More information about the llvm-commits mailing list