[llvm] [AArch64][GISel] Add legalizer support for @llvm.umul.with.overflow.i128 (PR #170101)

via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 1 03:08:04 PST 2025


https://github.com/ayank227 created https://github.com/llvm/llvm-project/pull/170101

This matches the similar pattern how SelectionDAG handles this. I have also added split_store_128 to optimize i128 G_STORE(G_MERGE_VALUES(x, y)) into two i64 G_STOREs.

>From df3b94afbbe204c1f73b46ac0f63ed33d8fb5399 Mon Sep 17 00:00:00 2001
From: Ayan Kundu <ayank at nvidia.com>
Date: Fri, 28 Nov 2025 08:47:54 +0000
Subject: [PATCH] [AArch64][GISel] Add legalizer support for
 @llvm.umul.with.overflow.i128

This matches the similar pattern how SelectionDAG handles this.
I have also added split_store_128 to optimize i128 G_STORE(G_MERGE_VALUES(x, y)) into two i64 G_STOREs.
---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |   1 +
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |  94 ++++++++++++++
 llvm/lib/Target/AArch64/AArch64Combine.td     |  11 +-
 .../GISel/AArch64PostLegalizerLowering.cpp    |  63 ++++++++++
 .../AArch64/GlobalISel/arm64-atomic-128.ll    |  60 +++------
 .../AArch64/GlobalISel/arm64-fallback.ll      |  13 --
 .../AArch64/GlobalISel/v8.4-atomic-128.ll     |  42 ++-----
 .../CodeGen/AArch64/aarch64-fold-lslfast.ll   |  26 ++--
 .../test/CodeGen/AArch64/arm64-sli-sri-opt.ll |  18 +--
 llvm/test/CodeGen/AArch64/dup.ll              |  63 +++++-----
 .../test/CodeGen/AArch64/fptosi-sat-vector.ll | 102 ++++++---------
 .../test/CodeGen/AArch64/fptoui-sat-vector.ll | 102 ++++++---------
 .../CodeGen/AArch64/i128_with_overflow.ll     | 119 ++++++++++++------
 llvm/test/CodeGen/AArch64/insertextract.ll    |  42 +++----
 llvm/test/CodeGen/AArch64/store.ll            |   7 +-
 15 files changed, 421 insertions(+), 342 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index a458cbd94ccb1..5cbbf719c5504 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -426,6 +426,7 @@ class LegalizerHelper {
   LLVM_ABI LegalizeResult narrowScalarAddSub(MachineInstr &MI, unsigned TypeIdx,
                                              LLT NarrowTy);
   LLVM_ABI LegalizeResult narrowScalarMul(MachineInstr &MI, LLT Ty);
+  LLVM_ABI LegalizeResult narrowScalarMULO(MachineInstr &MI, LLT Ty);
   LLVM_ABI LegalizeResult narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx,
                                             LLT Ty);
   LLVM_ABI LegalizeResult narrowScalarExtract(MachineInstr &MI,
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 120c38ab8404c..8c682923d47a9 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -1669,6 +1669,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
   case TargetOpcode::G_MUL:
   case TargetOpcode::G_UMULH:
     return narrowScalarMul(MI, NarrowTy);
+  case TargetOpcode::G_UMULO:
+    return narrowScalarMULO(MI, NarrowTy);
   case TargetOpcode::G_EXTRACT:
     return narrowScalarExtract(MI, TypeIdx, NarrowTy);
   case TargetOpcode::G_INSERT:
@@ -7193,6 +7195,98 @@ LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
   return Legalized;
 }
 
+// Narrow unsigned multiplication with overflow (G_UMULO).
+LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarMULO(MachineInstr &MI, LLT NarrowTy) {
+  auto [DstReg, OverflowReg, Src1, Src2] = MI.getFirst4Regs();
+
+  LLT Ty = MRI.getType(DstReg);
+  if (Ty.isVector())
+    return UnableToLegalize;
+
+  unsigned Size = Ty.getSizeInBits();
+  unsigned NarrowSize = NarrowTy.getSizeInBits();
+  if (Size % NarrowSize != 0)
+    return UnableToLegalize;
+
+  unsigned NumParts = Size / NarrowSize;
+  if (NumParts != 2)
+    return UnableToLegalize; // Only handle i128→i64 narrowing
+
+  // Split inputs into high/low parts
+  SmallVector<Register, 2> Src1Parts, Src2Parts;
+  extractParts(Src1, NarrowTy, NumParts, Src1Parts, MIRBuilder, MRI);
+  extractParts(Src2, NarrowTy, NumParts, Src2Parts, MIRBuilder, MRI);
+
+  Register LHSLo = Src1Parts[0];
+  Register LHSHi = Src1Parts[1];
+  Register RHSLo = Src2Parts[0];
+  Register RHSHi = Src2Parts[1];
+
+  // Check if both high parts are non-zero → guaranteed overflow
+  Register Zero = MIRBuilder.buildConstant(NarrowTy, 0).getReg(0);
+  Register LHSHiNZ =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), LHSHi, Zero)
+          .getReg(0);
+  Register RHSHiNZ =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), RHSHi, Zero)
+          .getReg(0);
+  Register BothHiNonZero =
+      MIRBuilder.buildAnd(LLT::scalar(1), LHSHiNZ, RHSHiNZ).getReg(0);
+
+  // Cross multiply LHSHi × RHSLo with overflow (use MUL+UMULH directly)
+  Register Mid1 = MIRBuilder.buildMul(NarrowTy, LHSHi, RHSLo).getReg(0);
+  Register Mid1Hi = MIRBuilder.buildUMulH(NarrowTy, LHSHi, RHSLo).getReg(0);
+  Register Ovf1 =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Mid1Hi, Zero)
+          .getReg(0);
+
+  // Cross multiply LHSLo × RHSHi with overflow (use MUL+UMULH directly)
+  Register Mid2 = MIRBuilder.buildMul(NarrowTy, LHSLo, RHSHi).getReg(0);
+  Register Mid2Hi = MIRBuilder.buildUMulH(NarrowTy, LHSLo, RHSHi).getReg(0);
+  Register Ovf2 =
+      MIRBuilder.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Mid2Hi, Zero)
+          .getReg(0);
+
+  // Add the cross products (HighSum = Mid1 + Mid2)
+  Register HighSum = MIRBuilder.buildAdd(NarrowTy, Mid1, Mid2).getReg(0);
+
+  // Multiply low parts to get full 128-bit result (using ZEXT pattern)
+  LLT WideTy = LLT::scalar(Size);
+  Register LHSLoExt = MIRBuilder.buildZExt(WideTy, LHSLo).getReg(0);
+  Register RHSLoExt = MIRBuilder.buildZExt(WideTy, RHSLo).getReg(0);
+  Register FullMul = MIRBuilder.buildMul(WideTy, LHSLoExt, RHSLoExt).getReg(0);
+
+  SmallVector<Register, 2> LowMulParts;
+  extractParts(FullMul, NarrowTy, NumParts, LowMulParts, MIRBuilder, MRI);
+  Register ResLo = LowMulParts[0];
+  Register ResHi = LowMulParts[1];
+
+  // Add HighSum to ResHi with overflow detection
+  auto AddHighSum =
+      MIRBuilder.buildUAddo(NarrowTy, LLT::scalar(1), ResHi, HighSum);
+  Register FinalHi = AddHighSum.getReg(0);
+  Register Ovf3 = AddHighSum.getReg(1);
+
+  // Combine all overflow flags
+  // overflow = BothHiNonZero || Ovf1 || Ovf2 || Ovf3
+  Register Ovf12 = MIRBuilder.buildOr(LLT::scalar(1), Ovf1, Ovf2).getReg(0);
+  Register Ovf123 = MIRBuilder.buildOr(LLT::scalar(1), Ovf12, Ovf3).getReg(0);
+  Register FinalOvf =
+      MIRBuilder.buildOr(LLT::scalar(1), BothHiNonZero, Ovf123).getReg(0);
+
+  // Build final result
+  // Emit G_MERGE_VALUES for the result
+  SmallVector<Register, 2> ResultParts = {ResLo, FinalHi};
+  MIRBuilder.buildMergeLikeInstr(DstReg, ResultParts);
+
+  // Normalize overflow to s1 type
+  MIRBuilder.buildCopy(OverflowReg, FinalOvf);
+
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 LegalizerHelper::LegalizeResult
 LegalizerHelper::narrowScalarFPTOI(MachineInstr &MI, unsigned TypeIdx,
                                    LLT NarrowTy) {
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index 278314792bfb9..32955f3a6f952 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -280,6 +280,14 @@ def form_truncstore : GICombineRule<
   (apply [{ applyFormTruncstore(*${root}, MRI, B, Observer, ${matchinfo}); }])
 >;
 
+def split_store_128_matchdata : GIDefMatchData<"std::pair<Register, Register>">;
+def split_store_128 : GICombineRule<
+  (defs root:$root, split_store_128_matchdata:$matchinfo),
+  (match (G_STORE $src, $addr):$root,
+          [{ return matchSplitStore128(*${root}, MRI, ${matchinfo}); }]),
+  (apply [{ applySplitStore128(*${root}, MRI, B, Observer, ${matchinfo}); }])
+>;
+
 def fold_merge_to_zext : GICombineRule<
   (defs root:$d),
   (match (wip_match_opcode G_MERGE_VALUES):$d,
@@ -339,7 +347,8 @@ def AArch64PostLegalizerLowering
     : GICombiner<"AArch64PostLegalizerLoweringImpl",
                        [shuffle_vector_lowering, vashr_vlshr_imm,
                         icmp_lowering, build_vector_lowering,
-                        lower_vector_fcmp, form_truncstore, fconstant_to_constant,
+                        lower_vector_fcmp, form_truncstore, split_store_128,
+                        fconstant_to_constant,
                         vector_sext_inreg_to_shift,
                         unmerge_ext_to_unmerge, lower_mulv2s64,
                         vector_unmerge_lowering, insertelt_nonconst,
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index 4fba593b3d0fb..7152558580763 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -1119,6 +1119,69 @@ void applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
   Observer.changedInstr(MI);
 }
 
+/// Optimize i128 stores by splitting into two i64 stores for STP pairing
+bool matchSplitStore128(MachineInstr &MI, MachineRegisterInfo &MRI,
+                        std::pair<Register, Register> &Parts) {
+  assert(MI.getOpcode() == TargetOpcode::G_STORE);
+  GStore &Store = cast<GStore>(MI);
+
+  Register ValueReg = Store.getValueReg();
+  LLT ValueTy = MRI.getType(ValueReg);
+
+  // Only handle scalar types
+  if (!ValueTy.isScalar())
+    return false;
+
+  if (ValueTy.getSizeInBits() != 128)
+    return false;
+
+  // Check if the value comes from G_MERGE_VALUES
+  MachineInstr *DefMI = MRI.getVRegDef(ValueReg);
+  if (!DefMI || DefMI->getOpcode() != TargetOpcode::G_MERGE_VALUES)
+    return false;
+
+  // Get the two i64 parts
+  if (DefMI->getNumOperands() != 3) // Dst + 2 sources
+    return false;
+
+  Register Part0 = DefMI->getOperand(1).getReg();
+  Register Part1 = DefMI->getOperand(2).getReg();
+
+  if (MRI.getType(Part0) != LLT::scalar(64) ||
+      MRI.getType(Part1) != LLT::scalar(64))
+    return false;
+
+  Parts = {Part0, Part1};
+  return true;
+}
+
+void applySplitStore128(MachineInstr &MI, MachineRegisterInfo &MRI,
+                        MachineIRBuilder &B, GISelChangeObserver &Observer,
+                        std::pair<Register, Register> &Parts) {
+  assert(MI.getOpcode() == TargetOpcode::G_STORE);
+  GStore &Store = cast<GStore>(MI);
+
+  B.setInstrAndDebugLoc(MI);
+
+  Register PtrReg = Store.getPointerReg();
+  MachineMemOperand &MMO = Store.getMMO();
+
+  // Create two i64 stores
+  // Store low part at [ptr]
+  B.buildStore(Parts.first, PtrReg, MMO.getPointerInfo(), MMO.getAlign(),
+               MMO.getFlags());
+
+  // Calculate offset for high part: ptr + 8
+  auto Offset = B.buildConstant(LLT::scalar(64), 8);
+  auto PtrHi = B.buildPtrAdd(MRI.getType(PtrReg), PtrReg, Offset);
+
+  // Store high part at [ptr + 8]
+  B.buildStore(Parts.second, PtrHi, MMO.getPointerInfo().getWithOffset(8),
+               commonAlignment(MMO.getAlign(), 8), MMO.getFlags());
+
+  MI.eraseFromParent();
+}
+
 // Lower vector G_SEXT_INREG back to shifts for selection. We allowed them to
 // form in the first place for combine opportunities, so any remaining ones
 // at this stage need be lowered back.
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
index be51210882eaa..ac18249950e9e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-atomic-128.ll
@@ -27,9 +27,7 @@ define void @val_compare_and_swap(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-LLSC-O1-NEXT:    stxp w10, x4, x5, [x0]
 ; CHECK-LLSC-O1-NEXT:    cbnz w10, .LBB0_1
 ; CHECK-LLSC-O1-NEXT:  .LBB0_4:
-; CHECK-LLSC-O1-NEXT:    mov v0.d[0], x8
-; CHECK-LLSC-O1-NEXT:    mov v0.d[1], x9
-; CHECK-LLSC-O1-NEXT:    str q0, [x0]
+; CHECK-LLSC-O1-NEXT:    stp x8, x9, [x0]
 ; CHECK-LLSC-O1-NEXT:    ret
 ;
 ; CHECK-OUTLINE-LLSC-O1-LABEL: val_compare_and_swap:
@@ -45,9 +43,7 @@ define void @val_compare_and_swap(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x3, x5
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x4, x19
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    bl __aarch64_cas16_acq
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[0], x0
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[1], x1
-; CHECK-OUTLINE-LLSC-O1-NEXT:    str q0, [x19]
+; CHECK-OUTLINE-LLSC-O1-NEXT:    stp x0, x1, [x19]
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ret
 ;
@@ -58,9 +54,7 @@ define void @val_compare_and_swap(ptr %p, i128 %oldval, i128 %newval) {
 ; CHECK-CAS-O1-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
 ; CHECK-CAS-O1-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
 ; CHECK-CAS-O1-NEXT:    caspa x2, x3, x4, x5, [x0]
-; CHECK-CAS-O1-NEXT:    mov v0.d[0], x2
-; CHECK-CAS-O1-NEXT:    mov v0.d[1], x3
-; CHECK-CAS-O1-NEXT:    str q0, [x0]
+; CHECK-CAS-O1-NEXT:    stp x2, x3, [x0]
 ; CHECK-CAS-O1-NEXT:    ret
 ;
 ; CHECK-LLSC-O0-LABEL: val_compare_and_swap:
@@ -154,9 +148,7 @@ define void @val_compare_and_swap_monotonic_seqcst(ptr %p, i128 %oldval, i128 %n
 ; CHECK-LLSC-O1-NEXT:    stlxp w10, x4, x5, [x0]
 ; CHECK-LLSC-O1-NEXT:    cbnz w10, .LBB1_1
 ; CHECK-LLSC-O1-NEXT:  .LBB1_4:
-; CHECK-LLSC-O1-NEXT:    mov v0.d[0], x8
-; CHECK-LLSC-O1-NEXT:    mov v0.d[1], x9
-; CHECK-LLSC-O1-NEXT:    str q0, [x0]
+; CHECK-LLSC-O1-NEXT:    stp x8, x9, [x0]
 ; CHECK-LLSC-O1-NEXT:    ret
 ;
 ; CHECK-OUTLINE-LLSC-O1-LABEL: val_compare_and_swap_monotonic_seqcst:
@@ -172,9 +164,7 @@ define void @val_compare_and_swap_monotonic_seqcst(ptr %p, i128 %oldval, i128 %n
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x3, x5
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x4, x19
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    bl __aarch64_cas16_acq_rel
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[0], x0
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[1], x1
-; CHECK-OUTLINE-LLSC-O1-NEXT:    str q0, [x19]
+; CHECK-OUTLINE-LLSC-O1-NEXT:    stp x0, x1, [x19]
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ret
 ;
@@ -185,9 +175,7 @@ define void @val_compare_and_swap_monotonic_seqcst(ptr %p, i128 %oldval, i128 %n
 ; CHECK-CAS-O1-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
 ; CHECK-CAS-O1-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
 ; CHECK-CAS-O1-NEXT:    caspal x2, x3, x4, x5, [x0]
-; CHECK-CAS-O1-NEXT:    mov v0.d[0], x2
-; CHECK-CAS-O1-NEXT:    mov v0.d[1], x3
-; CHECK-CAS-O1-NEXT:    str q0, [x0]
+; CHECK-CAS-O1-NEXT:    stp x2, x3, [x0]
 ; CHECK-CAS-O1-NEXT:    ret
 ;
 ; CHECK-LLSC-O0-LABEL: val_compare_and_swap_monotonic_seqcst:
@@ -281,9 +269,7 @@ define void @val_compare_and_swap_release_acquire(ptr %p, i128 %oldval, i128 %ne
 ; CHECK-LLSC-O1-NEXT:    stlxp w10, x4, x5, [x0]
 ; CHECK-LLSC-O1-NEXT:    cbnz w10, .LBB2_1
 ; CHECK-LLSC-O1-NEXT:  .LBB2_4:
-; CHECK-LLSC-O1-NEXT:    mov v0.d[0], x8
-; CHECK-LLSC-O1-NEXT:    mov v0.d[1], x9
-; CHECK-LLSC-O1-NEXT:    str q0, [x0]
+; CHECK-LLSC-O1-NEXT:    stp x8, x9, [x0]
 ; CHECK-LLSC-O1-NEXT:    ret
 ;
 ; CHECK-OUTLINE-LLSC-O1-LABEL: val_compare_and_swap_release_acquire:
@@ -299,9 +285,7 @@ define void @val_compare_and_swap_release_acquire(ptr %p, i128 %oldval, i128 %ne
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x3, x5
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x4, x19
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    bl __aarch64_cas16_acq_rel
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[0], x0
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[1], x1
-; CHECK-OUTLINE-LLSC-O1-NEXT:    str q0, [x19]
+; CHECK-OUTLINE-LLSC-O1-NEXT:    stp x0, x1, [x19]
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ret
 ;
@@ -312,9 +296,7 @@ define void @val_compare_and_swap_release_acquire(ptr %p, i128 %oldval, i128 %ne
 ; CHECK-CAS-O1-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
 ; CHECK-CAS-O1-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
 ; CHECK-CAS-O1-NEXT:    caspal x2, x3, x4, x5, [x0]
-; CHECK-CAS-O1-NEXT:    mov v0.d[0], x2
-; CHECK-CAS-O1-NEXT:    mov v0.d[1], x3
-; CHECK-CAS-O1-NEXT:    str q0, [x0]
+; CHECK-CAS-O1-NEXT:    stp x2, x3, [x0]
 ; CHECK-CAS-O1-NEXT:    ret
 ;
 ; CHECK-LLSC-O0-LABEL: val_compare_and_swap_release_acquire:
@@ -408,9 +390,7 @@ define void @val_compare_and_swap_monotonic(ptr %p, i128 %oldval, i128 %newval)
 ; CHECK-LLSC-O1-NEXT:    stlxp w10, x4, x5, [x0]
 ; CHECK-LLSC-O1-NEXT:    cbnz w10, .LBB3_1
 ; CHECK-LLSC-O1-NEXT:  .LBB3_4:
-; CHECK-LLSC-O1-NEXT:    mov v0.d[0], x8
-; CHECK-LLSC-O1-NEXT:    mov v0.d[1], x9
-; CHECK-LLSC-O1-NEXT:    str q0, [x0]
+; CHECK-LLSC-O1-NEXT:    stp x8, x9, [x0]
 ; CHECK-LLSC-O1-NEXT:    ret
 ;
 ; CHECK-OUTLINE-LLSC-O1-LABEL: val_compare_and_swap_monotonic:
@@ -426,9 +406,7 @@ define void @val_compare_and_swap_monotonic(ptr %p, i128 %oldval, i128 %newval)
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x3, x5
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    mov x4, x19
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    bl __aarch64_cas16_acq_rel
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[0], x0
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[1], x1
-; CHECK-OUTLINE-LLSC-O1-NEXT:    str q0, [x19]
+; CHECK-OUTLINE-LLSC-O1-NEXT:    stp x0, x1, [x19]
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ldp x30, x19, [sp], #16 // 16-byte Folded Reload
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ret
 ;
@@ -439,9 +417,7 @@ define void @val_compare_and_swap_monotonic(ptr %p, i128 %oldval, i128 %newval)
 ; CHECK-CAS-O1-NEXT:    // kill: def $x3 killed $x3 killed $x2_x3 def $x2_x3
 ; CHECK-CAS-O1-NEXT:    // kill: def $x5 killed $x5 killed $x4_x5 def $x4_x5
 ; CHECK-CAS-O1-NEXT:    caspal x2, x3, x4, x5, [x0]
-; CHECK-CAS-O1-NEXT:    mov v0.d[0], x2
-; CHECK-CAS-O1-NEXT:    mov v0.d[1], x3
-; CHECK-CAS-O1-NEXT:    str q0, [x0]
+; CHECK-CAS-O1-NEXT:    stp x2, x3, [x0]
 ; CHECK-CAS-O1-NEXT:    ret
 ;
 ; CHECK-LLSC-O0-LABEL: val_compare_and_swap_monotonic:
@@ -525,9 +501,7 @@ define void @atomic_load_relaxed(i64, i64, ptr %p, ptr %p2) {
 ; CHECK-LLSC-O1-NEXT:    stxp w10, x9, x8, [x2]
 ; CHECK-LLSC-O1-NEXT:    cbnz w10, .LBB4_1
 ; CHECK-LLSC-O1-NEXT:  // %bb.2: // %atomicrmw.end
-; CHECK-LLSC-O1-NEXT:    mov v0.d[0], x9
-; CHECK-LLSC-O1-NEXT:    mov v0.d[1], x8
-; CHECK-LLSC-O1-NEXT:    str q0, [x3]
+; CHECK-LLSC-O1-NEXT:    stp x9, x8, [x3]
 ; CHECK-LLSC-O1-NEXT:    ret
 ;
 ; CHECK-OUTLINE-LLSC-O1-LABEL: atomic_load_relaxed:
@@ -538,9 +512,7 @@ define void @atomic_load_relaxed(i64, i64, ptr %p, ptr %p2) {
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    stxp w10, x9, x8, [x2]
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    cbnz w10, .LBB4_1
 ; CHECK-OUTLINE-LLSC-O1-NEXT:  // %bb.2: // %atomicrmw.end
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[0], x9
-; CHECK-OUTLINE-LLSC-O1-NEXT:    mov v0.d[1], x8
-; CHECK-OUTLINE-LLSC-O1-NEXT:    str q0, [x3]
+; CHECK-OUTLINE-LLSC-O1-NEXT:    stp x9, x8, [x3]
 ; CHECK-OUTLINE-LLSC-O1-NEXT:    ret
 ;
 ; CHECK-CAS-O1-LABEL: atomic_load_relaxed:
@@ -548,9 +520,7 @@ define void @atomic_load_relaxed(i64, i64, ptr %p, ptr %p2) {
 ; CHECK-CAS-O1-NEXT:    mov x0, xzr
 ; CHECK-CAS-O1-NEXT:    mov x1, xzr
 ; CHECK-CAS-O1-NEXT:    casp x0, x1, x0, x1, [x2]
-; CHECK-CAS-O1-NEXT:    mov v0.d[0], x0
-; CHECK-CAS-O1-NEXT:    mov v0.d[1], x1
-; CHECK-CAS-O1-NEXT:    str q0, [x3]
+; CHECK-CAS-O1-NEXT:    stp x0, x1, [x3]
 ; CHECK-CAS-O1-NEXT:    ret
 ;
 ; CHECK-LLSC-O0-LABEL: atomic_load_relaxed:
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index f8cd868a4c755..94469cf262e3e 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -101,19 +101,6 @@ entry:
   ret void
 }
 
-; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %4:_(s128), %5:_(s1) = G_UMULO %0:_, %6:_ (in function: umul_s128)
-; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for umul_s128
-; FALLBACK-WITH-REPORT-OUT-LABEL: umul_s128
-declare {i128, i1} @llvm.umul.with.overflow.i128(i128, i128) nounwind readnone
-define zeroext i1 @umul_s128(i128 %v1, ptr %res) {
-entry:
-  %t = call {i128, i1} @llvm.umul.with.overflow.i128(i128 %v1, i128 2)
-  %val = extractvalue {i128, i1} %t, 0
-  %obit = extractvalue {i128, i1} %t, 1
-  store i128 %val, ptr %res
-  ret i1 %obit
-}
-
 ; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to translate instruction: {{.*}}llvm.experimental.gc.statepoint{{.*}} (in function: gc_intr)
 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for gc_intr
 ; FALLBACK-WITH-REPORT-OUT-LABEL: gc_intr
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll b/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll
index fe7e24c2d8ba5..a4dcd0155a449 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/v8.4-atomic-128.ll
@@ -5,56 +5,42 @@ define void @test_atomic_load(ptr %addr) {
 ; CHECK-LABEL: test_atomic_load:
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %res.0 = load atomic i128, ptr %addr monotonic, align 16
   store i128 %res.0, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %res.1 = load atomic i128, ptr %addr unordered, align 16
   store i128 %res.1, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
-; CHECK: dmb ish
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: dmb ishld
+; CHECK: stp [[LO]], [[HI]], [x0]
   %res.2 = load atomic i128, ptr %addr acquire, align 16
   store i128 %res.2, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
 ; CHECK: dmb ish
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %res.3 = load atomic i128, ptr %addr seq_cst, align 16
   store i128 %res.3, ptr %addr
 
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #8]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %addr8.1 = getelementptr i8,  ptr %addr, i32 8
   %res.5 = load atomic i128, ptr %addr8.1 monotonic, align 16
   store i128 %res.5, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #504]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %addr8.2 = getelementptr i8,  ptr %addr, i32 504
   %res.6 = load atomic i128, ptr %addr8.2 monotonic, align 16
   store i128 %res.6, ptr %addr
 
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0, #-512]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %addr8.3 = getelementptr i8,  ptr %addr, i32 -512
   %res.7 = load atomic i128, ptr %addr8.3 monotonic, align 16
   store i128 %res.7, ptr %addr
@@ -76,9 +62,7 @@ define void @test_nonfolded_load1(ptr %addr) {
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #4
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %addr8.1 = getelementptr i8,  ptr %addr, i32 4
   %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
   store i128 %res.1, ptr %addr
@@ -91,9 +75,7 @@ define void @test_nonfolded_load2(ptr %addr) {
 
 ; CHECK: add x[[ADDR:[0-9]+]], x0, #512
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK:  stp [[LO]], [[HI]], [x0]
   %addr8.1 = getelementptr i8,  ptr %addr, i32 512
   %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
   store i128 %res.1, ptr %addr
@@ -106,9 +88,7 @@ define void @test_nonfolded_load3(ptr %addr) {
 
 ; CHECK: sub x[[ADDR:[0-9]+]], x0, #520
 ; CHECK: ldp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x[[ADDR]]]
-; CHECK: mov v[[Q:[0-9]+]].d[0], [[LO]]
-; CHECK: mov v[[Q]].d[1], [[HI]]
-; CHECK: str q[[Q]], [x0]
+; CHECK: stp [[LO]], [[HI]], [x0]
   %addr8.1 = getelementptr i8,  ptr %addr, i32 -520
   %res.1 = load atomic i128, ptr %addr8.1 monotonic, align 16
   store i128 %res.1, ptr %addr
diff --git a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
index 63dcafed2320a..a188a1cfa7502 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fold-lslfast.ll
@@ -383,13 +383,11 @@ define i128 @gep4(ptr %p, i128 %a, i64 %b) {
 ; CHECK0-GISEL-LABEL: gep4:
 ; CHECK0-GISEL:       // %bb.0:
 ; CHECK0-GISEL-NEXT:    add x8, x0, x4, lsl #4
-; CHECK0-GISEL-NEXT:    mov v0.d[0], x2
-; CHECK0-GISEL-NEXT:    ldr q1, [x8]
-; CHECK0-GISEL-NEXT:    mov d2, v1.d[1]
-; CHECK0-GISEL-NEXT:    mov v0.d[1], x3
-; CHECK0-GISEL-NEXT:    fmov x0, d1
-; CHECK0-GISEL-NEXT:    fmov x1, d2
-; CHECK0-GISEL-NEXT:    str q0, [x8]
+; CHECK0-GISEL-NEXT:    ldr q0, [x8]
+; CHECK0-GISEL-NEXT:    stp x2, x3, [x8]
+; CHECK0-GISEL-NEXT:    mov d1, v0.d[1]
+; CHECK0-GISEL-NEXT:    fmov x0, d0
+; CHECK0-GISEL-NEXT:    fmov x1, d1
 ; CHECK0-GISEL-NEXT:    ret
 ;
 ; CHECK3-SDAG-LABEL: gep4:
@@ -401,14 +399,12 @@ define i128 @gep4(ptr %p, i128 %a, i64 %b) {
 ;
 ; CHECK3-GISEL-LABEL: gep4:
 ; CHECK3-GISEL:       // %bb.0:
-; CHECK3-GISEL-NEXT:    ldr q1, [x0, x4, lsl #4]
-; CHECK3-GISEL-NEXT:    mov v0.d[0], x2
-; CHECK3-GISEL-NEXT:    mov x8, x0
-; CHECK3-GISEL-NEXT:    mov d2, v1.d[1]
-; CHECK3-GISEL-NEXT:    fmov x0, d1
-; CHECK3-GISEL-NEXT:    mov v0.d[1], x3
-; CHECK3-GISEL-NEXT:    fmov x1, d2
-; CHECK3-GISEL-NEXT:    str q0, [x8, x4, lsl #4]
+; CHECK3-GISEL-NEXT:    ldr q0, [x0, x4, lsl #4]
+; CHECK3-GISEL-NEXT:    add x8, x0, x4, lsl #4
+; CHECK3-GISEL-NEXT:    mov d1, v0.d[1]
+; CHECK3-GISEL-NEXT:    fmov x0, d0
+; CHECK3-GISEL-NEXT:    stp x2, x3, [x8]
+; CHECK3-GISEL-NEXT:    fmov x1, d1
 ; CHECK3-GISEL-NEXT:    ret
   %g = getelementptr inbounds i128, ptr %p, i64 %b
   %l = load i128, ptr %g
diff --git a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
index 0e1e15f9b6b91..cbcc6184182ae 100644
--- a/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-sli-sri-opt.ll
@@ -663,19 +663,11 @@ define void @testRightBad2x64(<2 x i64> %src1, <2 x i64> %src2, ptr %dest) nounw
 }
 
 define void @testLeftShouldNotCreateSLI1x128(<1 x i128> %src1, <1 x i128> %src2, ptr %dest) nounwind {
-; CHECK-SD-LABEL: testLeftShouldNotCreateSLI1x128:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    bfi x1, x2, #6, #58
-; CHECK-SD-NEXT:    stp x0, x1, [x4]
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: testLeftShouldNotCreateSLI1x128:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    mov.d v0[0], x0
-; CHECK-GI-NEXT:    bfi x1, x2, #6, #58
-; CHECK-GI-NEXT:    mov.d v0[1], x1
-; CHECK-GI-NEXT:    str q0, [x4]
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: testLeftShouldNotCreateSLI1x128:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bfi x1, x2, #6, #58
+; CHECK-NEXT:    stp x0, x1, [x4]
+; CHECK-NEXT:    ret
   %and.i = and <1 x i128> %src1, <i128 1180591620717411303423>
   %vshl_n = shl <1 x i128> %src2, <i128 70>
   %result = or <1 x i128> %and.i, %vshl_n
diff --git a/llvm/test/CodeGen/AArch64/dup.ll b/llvm/test/CodeGen/AArch64/dup.ll
index 6df6d76fb0592..6b0c62490a5db 100644
--- a/llvm/test/CodeGen/AArch64/dup.ll
+++ b/llvm/test/CodeGen/AArch64/dup.ll
@@ -1252,16 +1252,15 @@ define <2 x i128> @loaddup_str_v2i128(ptr %p) {
 ;
 ; CHECK-GI-LABEL: loaddup_str_v2i128:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    mov v0.d[0], xzr
+; CHECK-GI-NEXT:    ldr q0, [x0]
 ; CHECK-GI-NEXT:    mov x8, x0
-; CHECK-GI-NEXT:    mov d2, v1.d[1]
-; CHECK-GI-NEXT:    fmov x0, d1
-; CHECK-GI-NEXT:    fmov x2, d1
-; CHECK-GI-NEXT:    mov v0.d[1], xzr
-; CHECK-GI-NEXT:    fmov x1, d2
-; CHECK-GI-NEXT:    fmov x3, d2
-; CHECK-GI-NEXT:    str q0, [x8]
+; CHECK-GI-NEXT:    str xzr, [x0]
+; CHECK-GI-NEXT:    str xzr, [x8, #8]
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    fmov x2, d0
+; CHECK-GI-NEXT:    fmov x1, d1
+; CHECK-GI-NEXT:    fmov x3, d1
 ; CHECK-GI-NEXT:    ret
 entry:
   %a = load i128, ptr %p
@@ -1340,18 +1339,17 @@ define <3 x i128> @loaddup_str_v3i128(ptr %p) {
 ;
 ; CHECK-GI-LABEL: loaddup_str_v3i128:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    mov v0.d[0], xzr
+; CHECK-GI-NEXT:    ldr q0, [x0]
 ; CHECK-GI-NEXT:    mov x8, x0
-; CHECK-GI-NEXT:    mov d2, v1.d[1]
-; CHECK-GI-NEXT:    fmov x0, d1
-; CHECK-GI-NEXT:    fmov x2, d1
-; CHECK-GI-NEXT:    fmov x4, d1
-; CHECK-GI-NEXT:    mov v0.d[1], xzr
-; CHECK-GI-NEXT:    fmov x1, d2
-; CHECK-GI-NEXT:    fmov x3, d2
-; CHECK-GI-NEXT:    fmov x5, d2
-; CHECK-GI-NEXT:    str q0, [x8]
+; CHECK-GI-NEXT:    str xzr, [x0]
+; CHECK-GI-NEXT:    str xzr, [x8, #8]
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    fmov x2, d0
+; CHECK-GI-NEXT:    fmov x4, d0
+; CHECK-GI-NEXT:    fmov x1, d1
+; CHECK-GI-NEXT:    fmov x3, d1
+; CHECK-GI-NEXT:    fmov x5, d1
 ; CHECK-GI-NEXT:    ret
 entry:
   %a = load i128, ptr %p
@@ -1440,20 +1438,19 @@ define <4 x i128> @loaddup_str_v4i128(ptr %p) {
 ;
 ; CHECK-GI-LABEL: loaddup_str_v4i128:
 ; CHECK-GI:       // %bb.0: // %entry
-; CHECK-GI-NEXT:    ldr q1, [x0]
-; CHECK-GI-NEXT:    mov v0.d[0], xzr
+; CHECK-GI-NEXT:    ldr q0, [x0]
 ; CHECK-GI-NEXT:    mov x8, x0
-; CHECK-GI-NEXT:    mov d2, v1.d[1]
-; CHECK-GI-NEXT:    fmov x0, d1
-; CHECK-GI-NEXT:    fmov x2, d1
-; CHECK-GI-NEXT:    fmov x4, d1
-; CHECK-GI-NEXT:    fmov x6, d1
-; CHECK-GI-NEXT:    mov v0.d[1], xzr
-; CHECK-GI-NEXT:    fmov x1, d2
-; CHECK-GI-NEXT:    fmov x3, d2
-; CHECK-GI-NEXT:    fmov x5, d2
-; CHECK-GI-NEXT:    fmov x7, d2
-; CHECK-GI-NEXT:    str q0, [x8]
+; CHECK-GI-NEXT:    str xzr, [x0]
+; CHECK-GI-NEXT:    str xzr, [x8, #8]
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    fmov x2, d0
+; CHECK-GI-NEXT:    fmov x4, d0
+; CHECK-GI-NEXT:    fmov x6, d0
+; CHECK-GI-NEXT:    fmov x1, d1
+; CHECK-GI-NEXT:    fmov x3, d1
+; CHECK-GI-NEXT:    fmov x5, d1
+; CHECK-GI-NEXT:    fmov x7, d1
 ; CHECK-GI-NEXT:    ret
 entry:
   %a = load i128, ptr %p
diff --git a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
index 2417205759767..d5045c08fc2ac 100644
--- a/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptosi-sat-vector.ll
@@ -4178,86 +4178,62 @@ define <8 x i128> @test_signed_v8f16_v8i128(<8 x half> %f) {
 ; CHECK-GI-CVT:       // %bb.0:
 ; CHECK-GI-CVT-NEXT:    mov h1, v0.h[1]
 ; CHECK-GI-CVT-NEXT:    mov h2, v0.h[2]
-; CHECK-GI-CVT-NEXT:    mov h3, v0.h[3]
-; CHECK-GI-CVT-NEXT:    mov h4, v0.h[4]
-; CHECK-GI-CVT-NEXT:    fcvt s5, h0
-; CHECK-GI-CVT-NEXT:    mov h6, v0.h[5]
-; CHECK-GI-CVT-NEXT:    mov h7, v0.h[6]
-; CHECK-GI-CVT-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-CVT-NEXT:    fcvt s3, h0
+; CHECK-GI-CVT-NEXT:    mov h4, v0.h[3]
+; CHECK-GI-CVT-NEXT:    mov h5, v0.h[4]
 ; CHECK-GI-CVT-NEXT:    fcvt s1, h1
+; CHECK-GI-CVT-NEXT:    fcvtzs x9, s3
 ; CHECK-GI-CVT-NEXT:    fcvt s2, h2
-; CHECK-GI-CVT-NEXT:    fcvt s3, h3
-; CHECK-GI-CVT-NEXT:    fcvtzs x9, s5
+; CHECK-GI-CVT-NEXT:    mov h3, v0.h[5]
 ; CHECK-GI-CVT-NEXT:    fcvt s4, h4
-; CHECK-GI-CVT-NEXT:    fcvt s5, h6
-; CHECK-GI-CVT-NEXT:    fcvt s0, h0
+; CHECK-GI-CVT-NEXT:    fcvt s5, h5
 ; CHECK-GI-CVT-NEXT:    fcvtzs x10, s1
-; CHECK-GI-CVT-NEXT:    fcvt s1, h7
+; CHECK-GI-CVT-NEXT:    mov h1, v0.h[6]
+; CHECK-GI-CVT-NEXT:    mov h0, v0.h[7]
 ; CHECK-GI-CVT-NEXT:    fcvtzs x11, s2
-; CHECK-GI-CVT-NEXT:    fcvtzs x12, s3
-; CHECK-GI-CVT-NEXT:    mov v2.d[0], x9
+; CHECK-GI-CVT-NEXT:    stp x9, xzr, [x8]
+; CHECK-GI-CVT-NEXT:    fcvt s2, h3
 ; CHECK-GI-CVT-NEXT:    fcvtzs x9, s4
-; CHECK-GI-CVT-NEXT:    mov v3.d[0], x10
+; CHECK-GI-CVT-NEXT:    stp x10, xzr, [x8, #16]
+; CHECK-GI-CVT-NEXT:    fcvt s1, h1
 ; CHECK-GI-CVT-NEXT:    fcvtzs x10, s5
-; CHECK-GI-CVT-NEXT:    mov v4.d[0], x11
-; CHECK-GI-CVT-NEXT:    fcvtzs x11, s1
-; CHECK-GI-CVT-NEXT:    mov v1.d[0], x12
-; CHECK-GI-CVT-NEXT:    fcvtzs x12, s0
-; CHECK-GI-CVT-NEXT:    mov v0.d[0], x9
-; CHECK-GI-CVT-NEXT:    mov v2.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v5.d[0], x10
-; CHECK-GI-CVT-NEXT:    mov v3.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v4.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v6.d[0], x11
-; CHECK-GI-CVT-NEXT:    mov v7.d[0], x12
-; CHECK-GI-CVT-NEXT:    mov v1.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v0.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v5.d[1], xzr
-; CHECK-GI-CVT-NEXT:    stp q2, q3, [x8]
-; CHECK-GI-CVT-NEXT:    mov v6.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v7.d[1], xzr
-; CHECK-GI-CVT-NEXT:    stp q4, q1, [x8, #32]
-; CHECK-GI-CVT-NEXT:    stp q0, q5, [x8, #64]
-; CHECK-GI-CVT-NEXT:    stp q6, q7, [x8, #96]
+; CHECK-GI-CVT-NEXT:    fcvt s0, h0
+; CHECK-GI-CVT-NEXT:    stp x11, xzr, [x8, #32]
+; CHECK-GI-CVT-NEXT:    fcvtzs x11, s2
+; CHECK-GI-CVT-NEXT:    stp x9, xzr, [x8, #48]
+; CHECK-GI-CVT-NEXT:    fcvtzs x9, s1
+; CHECK-GI-CVT-NEXT:    stp x10, xzr, [x8, #64]
+; CHECK-GI-CVT-NEXT:    fcvtzs x10, s0
+; CHECK-GI-CVT-NEXT:    stp x11, xzr, [x8, #80]
+; CHECK-GI-CVT-NEXT:    stp x9, xzr, [x8, #96]
+; CHECK-GI-CVT-NEXT:    stp x10, xzr, [x8, #112]
 ; CHECK-GI-CVT-NEXT:    ret
 ;
 ; CHECK-GI-FP16-LABEL: test_signed_v8f16_v8i128:
 ; CHECK-GI-FP16:       // %bb.0:
 ; CHECK-GI-FP16-NEXT:    mov h1, v0.h[1]
 ; CHECK-GI-FP16-NEXT:    mov h2, v0.h[2]
-; CHECK-GI-FP16-NEXT:    mov h3, v0.h[3]
-; CHECK-GI-FP16-NEXT:    mov h4, v0.h[4]
 ; CHECK-GI-FP16-NEXT:    fcvtzs x9, h0
-; CHECK-GI-FP16-NEXT:    mov h5, v0.h[5]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[3]
 ; CHECK-GI-FP16-NEXT:    fcvtzs x10, h1
-; CHECK-GI-FP16-NEXT:    mov h1, v0.h[6]
+; CHECK-GI-FP16-NEXT:    mov h1, v0.h[4]
 ; CHECK-GI-FP16-NEXT:    fcvtzs x11, h2
+; CHECK-GI-FP16-NEXT:    stp x9, xzr, [x8]
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fcvtzs x9, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[6]
 ; CHECK-GI-FP16-NEXT:    mov h0, v0.h[7]
-; CHECK-GI-FP16-NEXT:    fcvtzs x12, h3
-; CHECK-GI-FP16-NEXT:    mov v2.d[0], x9
-; CHECK-GI-FP16-NEXT:    fcvtzs x9, h4
-; CHECK-GI-FP16-NEXT:    mov v3.d[0], x10
-; CHECK-GI-FP16-NEXT:    fcvtzs x10, h5
-; CHECK-GI-FP16-NEXT:    mov v4.d[0], x11
-; CHECK-GI-FP16-NEXT:    fcvtzs x11, h1
-; CHECK-GI-FP16-NEXT:    mov v1.d[0], x12
-; CHECK-GI-FP16-NEXT:    fcvtzs x12, h0
-; CHECK-GI-FP16-NEXT:    mov v0.d[0], x9
-; CHECK-GI-FP16-NEXT:    mov v2.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v5.d[0], x10
-; CHECK-GI-FP16-NEXT:    mov v3.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v4.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v6.d[0], x11
-; CHECK-GI-FP16-NEXT:    mov v7.d[0], x12
-; CHECK-GI-FP16-NEXT:    mov v1.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v0.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v5.d[1], xzr
-; CHECK-GI-FP16-NEXT:    stp q2, q3, [x8]
-; CHECK-GI-FP16-NEXT:    mov v6.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v7.d[1], xzr
-; CHECK-GI-FP16-NEXT:    stp q4, q1, [x8, #32]
-; CHECK-GI-FP16-NEXT:    stp q0, q5, [x8, #64]
-; CHECK-GI-FP16-NEXT:    stp q6, q7, [x8, #96]
+; CHECK-GI-FP16-NEXT:    stp x10, xzr, [x8, #16]
+; CHECK-GI-FP16-NEXT:    fcvtzs x10, h1
+; CHECK-GI-FP16-NEXT:    stp x11, xzr, [x8, #32]
+; CHECK-GI-FP16-NEXT:    fcvtzs x11, h2
+; CHECK-GI-FP16-NEXT:    stp x9, xzr, [x8, #48]
+; CHECK-GI-FP16-NEXT:    fcvtzs x9, h3
+; CHECK-GI-FP16-NEXT:    stp x10, xzr, [x8, #64]
+; CHECK-GI-FP16-NEXT:    fcvtzs x10, h0
+; CHECK-GI-FP16-NEXT:    stp x11, xzr, [x8, #80]
+; CHECK-GI-FP16-NEXT:    stp x9, xzr, [x8, #96]
+; CHECK-GI-FP16-NEXT:    stp x10, xzr, [x8, #112]
 ; CHECK-GI-FP16-NEXT:    ret
     %x = call <8 x i128> @llvm.fptosi.sat.v8f16.v8i128(<8 x half> %f)
     ret <8 x i128> %x
diff --git a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
index ecca1165753bf..94ef4ab0eec03 100644
--- a/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
+++ b/llvm/test/CodeGen/AArch64/fptoui-sat-vector.ll
@@ -3426,86 +3426,62 @@ define <8 x i128> @test_unsigned_v8f16_v8i128(<8 x half> %f) {
 ; CHECK-GI-CVT:       // %bb.0:
 ; CHECK-GI-CVT-NEXT:    mov h1, v0.h[1]
 ; CHECK-GI-CVT-NEXT:    mov h2, v0.h[2]
-; CHECK-GI-CVT-NEXT:    mov h3, v0.h[3]
-; CHECK-GI-CVT-NEXT:    mov h4, v0.h[4]
-; CHECK-GI-CVT-NEXT:    fcvt s5, h0
-; CHECK-GI-CVT-NEXT:    mov h6, v0.h[5]
-; CHECK-GI-CVT-NEXT:    mov h7, v0.h[6]
-; CHECK-GI-CVT-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-CVT-NEXT:    fcvt s3, h0
+; CHECK-GI-CVT-NEXT:    mov h4, v0.h[3]
+; CHECK-GI-CVT-NEXT:    mov h5, v0.h[4]
 ; CHECK-GI-CVT-NEXT:    fcvt s1, h1
+; CHECK-GI-CVT-NEXT:    fcvtzu x9, s3
 ; CHECK-GI-CVT-NEXT:    fcvt s2, h2
-; CHECK-GI-CVT-NEXT:    fcvt s3, h3
-; CHECK-GI-CVT-NEXT:    fcvtzu x9, s5
+; CHECK-GI-CVT-NEXT:    mov h3, v0.h[5]
 ; CHECK-GI-CVT-NEXT:    fcvt s4, h4
-; CHECK-GI-CVT-NEXT:    fcvt s5, h6
-; CHECK-GI-CVT-NEXT:    fcvt s0, h0
+; CHECK-GI-CVT-NEXT:    fcvt s5, h5
 ; CHECK-GI-CVT-NEXT:    fcvtzu x10, s1
-; CHECK-GI-CVT-NEXT:    fcvt s1, h7
+; CHECK-GI-CVT-NEXT:    mov h1, v0.h[6]
+; CHECK-GI-CVT-NEXT:    mov h0, v0.h[7]
 ; CHECK-GI-CVT-NEXT:    fcvtzu x11, s2
-; CHECK-GI-CVT-NEXT:    fcvtzu x12, s3
-; CHECK-GI-CVT-NEXT:    mov v2.d[0], x9
+; CHECK-GI-CVT-NEXT:    stp x9, xzr, [x8]
+; CHECK-GI-CVT-NEXT:    fcvt s2, h3
 ; CHECK-GI-CVT-NEXT:    fcvtzu x9, s4
-; CHECK-GI-CVT-NEXT:    mov v3.d[0], x10
+; CHECK-GI-CVT-NEXT:    stp x10, xzr, [x8, #16]
+; CHECK-GI-CVT-NEXT:    fcvt s1, h1
 ; CHECK-GI-CVT-NEXT:    fcvtzu x10, s5
-; CHECK-GI-CVT-NEXT:    mov v4.d[0], x11
-; CHECK-GI-CVT-NEXT:    fcvtzu x11, s1
-; CHECK-GI-CVT-NEXT:    mov v1.d[0], x12
-; CHECK-GI-CVT-NEXT:    fcvtzu x12, s0
-; CHECK-GI-CVT-NEXT:    mov v0.d[0], x9
-; CHECK-GI-CVT-NEXT:    mov v2.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v5.d[0], x10
-; CHECK-GI-CVT-NEXT:    mov v3.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v4.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v6.d[0], x11
-; CHECK-GI-CVT-NEXT:    mov v7.d[0], x12
-; CHECK-GI-CVT-NEXT:    mov v1.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v0.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v5.d[1], xzr
-; CHECK-GI-CVT-NEXT:    stp q2, q3, [x8]
-; CHECK-GI-CVT-NEXT:    mov v6.d[1], xzr
-; CHECK-GI-CVT-NEXT:    mov v7.d[1], xzr
-; CHECK-GI-CVT-NEXT:    stp q4, q1, [x8, #32]
-; CHECK-GI-CVT-NEXT:    stp q0, q5, [x8, #64]
-; CHECK-GI-CVT-NEXT:    stp q6, q7, [x8, #96]
+; CHECK-GI-CVT-NEXT:    fcvt s0, h0
+; CHECK-GI-CVT-NEXT:    stp x11, xzr, [x8, #32]
+; CHECK-GI-CVT-NEXT:    fcvtzu x11, s2
+; CHECK-GI-CVT-NEXT:    stp x9, xzr, [x8, #48]
+; CHECK-GI-CVT-NEXT:    fcvtzu x9, s1
+; CHECK-GI-CVT-NEXT:    stp x10, xzr, [x8, #64]
+; CHECK-GI-CVT-NEXT:    fcvtzu x10, s0
+; CHECK-GI-CVT-NEXT:    stp x11, xzr, [x8, #80]
+; CHECK-GI-CVT-NEXT:    stp x9, xzr, [x8, #96]
+; CHECK-GI-CVT-NEXT:    stp x10, xzr, [x8, #112]
 ; CHECK-GI-CVT-NEXT:    ret
 ;
 ; CHECK-GI-FP16-LABEL: test_unsigned_v8f16_v8i128:
 ; CHECK-GI-FP16:       // %bb.0:
 ; CHECK-GI-FP16-NEXT:    mov h1, v0.h[1]
 ; CHECK-GI-FP16-NEXT:    mov h2, v0.h[2]
-; CHECK-GI-FP16-NEXT:    mov h3, v0.h[3]
-; CHECK-GI-FP16-NEXT:    mov h4, v0.h[4]
 ; CHECK-GI-FP16-NEXT:    fcvtzu x9, h0
-; CHECK-GI-FP16-NEXT:    mov h5, v0.h[5]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[3]
 ; CHECK-GI-FP16-NEXT:    fcvtzu x10, h1
-; CHECK-GI-FP16-NEXT:    mov h1, v0.h[6]
+; CHECK-GI-FP16-NEXT:    mov h1, v0.h[4]
 ; CHECK-GI-FP16-NEXT:    fcvtzu x11, h2
+; CHECK-GI-FP16-NEXT:    stp x9, xzr, [x8]
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fcvtzu x9, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[6]
 ; CHECK-GI-FP16-NEXT:    mov h0, v0.h[7]
-; CHECK-GI-FP16-NEXT:    fcvtzu x12, h3
-; CHECK-GI-FP16-NEXT:    mov v2.d[0], x9
-; CHECK-GI-FP16-NEXT:    fcvtzu x9, h4
-; CHECK-GI-FP16-NEXT:    mov v3.d[0], x10
-; CHECK-GI-FP16-NEXT:    fcvtzu x10, h5
-; CHECK-GI-FP16-NEXT:    mov v4.d[0], x11
-; CHECK-GI-FP16-NEXT:    fcvtzu x11, h1
-; CHECK-GI-FP16-NEXT:    mov v1.d[0], x12
-; CHECK-GI-FP16-NEXT:    fcvtzu x12, h0
-; CHECK-GI-FP16-NEXT:    mov v0.d[0], x9
-; CHECK-GI-FP16-NEXT:    mov v2.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v5.d[0], x10
-; CHECK-GI-FP16-NEXT:    mov v3.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v4.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v6.d[0], x11
-; CHECK-GI-FP16-NEXT:    mov v7.d[0], x12
-; CHECK-GI-FP16-NEXT:    mov v1.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v0.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v5.d[1], xzr
-; CHECK-GI-FP16-NEXT:    stp q2, q3, [x8]
-; CHECK-GI-FP16-NEXT:    mov v6.d[1], xzr
-; CHECK-GI-FP16-NEXT:    mov v7.d[1], xzr
-; CHECK-GI-FP16-NEXT:    stp q4, q1, [x8, #32]
-; CHECK-GI-FP16-NEXT:    stp q0, q5, [x8, #64]
-; CHECK-GI-FP16-NEXT:    stp q6, q7, [x8, #96]
+; CHECK-GI-FP16-NEXT:    stp x10, xzr, [x8, #16]
+; CHECK-GI-FP16-NEXT:    fcvtzu x10, h1
+; CHECK-GI-FP16-NEXT:    stp x11, xzr, [x8, #32]
+; CHECK-GI-FP16-NEXT:    fcvtzu x11, h2
+; CHECK-GI-FP16-NEXT:    stp x9, xzr, [x8, #48]
+; CHECK-GI-FP16-NEXT:    fcvtzu x9, h3
+; CHECK-GI-FP16-NEXT:    stp x10, xzr, [x8, #64]
+; CHECK-GI-FP16-NEXT:    fcvtzu x10, h0
+; CHECK-GI-FP16-NEXT:    stp x11, xzr, [x8, #80]
+; CHECK-GI-FP16-NEXT:    stp x9, xzr, [x8, #96]
+; CHECK-GI-FP16-NEXT:    stp x10, xzr, [x8, #112]
 ; CHECK-GI-FP16-NEXT:    ret
     %x = call <8 x i128> @llvm.fptoui.sat.v8f16.v8i128(<8 x half> %f)
     ret <8 x i128> %x
diff --git a/llvm/test/CodeGen/AArch64/i128_with_overflow.ll b/llvm/test/CodeGen/AArch64/i128_with_overflow.ll
index 3d90e094a5747..472ac0dbcacce 100644
--- a/llvm/test/CodeGen/AArch64/i128_with_overflow.ll
+++ b/llvm/test/CodeGen/AArch64/i128_with_overflow.ll
@@ -2,8 +2,7 @@
 ; RUN: llc -mtriple=aarch64 -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -o - %s 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:       warning: Instruction selection used fallback path for test_umul_i128
-; CHECK-GI-NEXT:  warning: Instruction selection used fallback path for test_smul_i128
+; CHECK-GI:  warning: Instruction selection used fallback path for test_smul_i128
 
 define i128 @test_uadd_i128(i128 noundef %x, i128 noundef %y) {
 ; CHECK-SD-LABEL: test_uadd_i128:
@@ -222,41 +221,87 @@ cleanup:
 }
 
 define i128 @test_umul_i128(i128 noundef %x, i128 noundef %y) {
-; CHECK-LABEL: test_umul_i128:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    orr x8, x1, x3
-; CHECK-NEXT:    cbz x8, .LBB4_2
-; CHECK-NEXT:  // %bb.1: // %overflow
-; CHECK-NEXT:    mul x9, x3, x0
-; CHECK-NEXT:    cmp x1, #0
-; CHECK-NEXT:    ccmp x3, #0, #4, ne
-; CHECK-NEXT:    umulh x10, x1, x2
-; CHECK-NEXT:    umulh x8, x3, x0
-; CHECK-NEXT:    madd x9, x1, x2, x9
-; CHECK-NEXT:    ccmp xzr, x10, #0, eq
-; CHECK-NEXT:    umulh x11, x0, x2
-; CHECK-NEXT:    ccmp xzr, x8, #0, eq
-; CHECK-NEXT:    mul x0, x0, x2
-; CHECK-NEXT:    cset w8, ne
-; CHECK-NEXT:    adds x1, x11, x9
-; CHECK-NEXT:    csinc w8, w8, wzr, lo
-; CHECK-NEXT:    cbnz w8, .LBB4_3
-; CHECK-NEXT:    b .LBB4_4
-; CHECK-NEXT:  .LBB4_2: // %overflow.no
-; CHECK-NEXT:    umulh x1, x0, x2
-; CHECK-NEXT:    mul x0, x0, x2
-; CHECK-NEXT:    cbz w8, .LBB4_4
-; CHECK-NEXT:  .LBB4_3: // %if.then
-; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    bl error
-; CHECK-NEXT:    // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT:    sxtw x0, w0
-; CHECK-NEXT:    asr x1, x0, #63
-; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
-; CHECK-NEXT:  .LBB4_4: // %cleanup
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_umul_i128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    orr x8, x1, x3
+; CHECK-SD-NEXT:    cbz x8, .LBB4_2
+; CHECK-SD-NEXT:  // %bb.1: // %overflow
+; CHECK-SD-NEXT:    mul x9, x3, x0
+; CHECK-SD-NEXT:    cmp x1, #0
+; CHECK-SD-NEXT:    ccmp x3, #0, #4, ne
+; CHECK-SD-NEXT:    umulh x10, x1, x2
+; CHECK-SD-NEXT:    umulh x8, x3, x0
+; CHECK-SD-NEXT:    madd x9, x1, x2, x9
+; CHECK-SD-NEXT:    ccmp xzr, x10, #0, eq
+; CHECK-SD-NEXT:    umulh x11, x0, x2
+; CHECK-SD-NEXT:    ccmp xzr, x8, #0, eq
+; CHECK-SD-NEXT:    mul x0, x0, x2
+; CHECK-SD-NEXT:    cset w8, ne
+; CHECK-SD-NEXT:    adds x1, x11, x9
+; CHECK-SD-NEXT:    csinc w8, w8, wzr, lo
+; CHECK-SD-NEXT:    cbnz w8, .LBB4_3
+; CHECK-SD-NEXT:    b .LBB4_4
+; CHECK-SD-NEXT:  .LBB4_2: // %overflow.no
+; CHECK-SD-NEXT:    umulh x1, x0, x2
+; CHECK-SD-NEXT:    mul x0, x0, x2
+; CHECK-SD-NEXT:    cbz w8, .LBB4_4
+; CHECK-SD-NEXT:  .LBB4_3: // %if.then
+; CHECK-SD-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-SD-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-SD-NEXT:    .cfi_offset w30, -16
+; CHECK-SD-NEXT:    bl error
+; CHECK-SD-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-SD-NEXT:    sxtw x0, w0
+; CHECK-SD-NEXT:    asr x1, x0, #63
+; CHECK-SD-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-SD-NEXT:  .LBB4_4: // %cleanup
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_umul_i128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    orr x8, x1, x3
+; CHECK-GI-NEXT:    cbz x8, .LBB4_2
+; CHECK-GI-NEXT:  // %bb.1: // %overflow
+; CHECK-GI-NEXT:    umulh x8, x1, x2
+; CHECK-GI-NEXT:    cmp x1, #0
+; CHECK-GI-NEXT:    cset w12, ne
+; CHECK-GI-NEXT:    cmp x3, #0
+; CHECK-GI-NEXT:    mul x9, x0, x3
+; CHECK-GI-NEXT:    cset w13, ne
+; CHECK-GI-NEXT:    and w12, w12, w13
+; CHECK-GI-NEXT:    umulh x10, x0, x3
+; CHECK-GI-NEXT:    cmp x8, #0
+; CHECK-GI-NEXT:    madd x9, x1, x2, x9
+; CHECK-GI-NEXT:    cset w8, ne
+; CHECK-GI-NEXT:    umulh x11, x0, x2
+; CHECK-GI-NEXT:    cmp x10, #0
+; CHECK-GI-NEXT:    mul x0, x0, x2
+; CHECK-GI-NEXT:    cset w10, ne
+; CHECK-GI-NEXT:    orr w8, w8, w10
+; CHECK-GI-NEXT:    orr w8, w12, w8
+; CHECK-GI-NEXT:    adds x1, x11, x9
+; CHECK-GI-NEXT:    cset w9, hs
+; CHECK-GI-NEXT:    orr w8, w8, w9
+; CHECK-GI-NEXT:    tbnz w8, #0, .LBB4_3
+; CHECK-GI-NEXT:    b .LBB4_4
+; CHECK-GI-NEXT:  .LBB4_2: // %overflow.no
+; CHECK-GI-NEXT:    mov x8, x0
+; CHECK-GI-NEXT:    mul x0, x0, x2
+; CHECK-GI-NEXT:    umulh x1, x8, x2
+; CHECK-GI-NEXT:    mov w8, #0 // =0x0
+; CHECK-GI-NEXT:    tbz w8, #0, .LBB4_4
+; CHECK-GI-NEXT:  .LBB4_3: // %if.then
+; CHECK-GI-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-GI-NEXT:    .cfi_offset w30, -16
+; CHECK-GI-NEXT:    bl error
+; CHECK-GI-NEXT:    // kill: def $w0 killed $w0 def $x0
+; CHECK-GI-NEXT:    asr w1, w0, #31
+; CHECK-GI-NEXT:    bfi x0, x1, #32, #32
+; CHECK-GI-NEXT:    bfi x1, x1, #32, #32
+; CHECK-GI-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-GI-NEXT:  .LBB4_4: // %cleanup
+; CHECK-GI-NEXT:    ret
 entry:
   %0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %x, i128 %y)
   %1 = extractvalue { i128, i1 } %0, 1
diff --git a/llvm/test/CodeGen/AArch64/insertextract.ll b/llvm/test/CodeGen/AArch64/insertextract.ll
index 9325c8c685560..c647399d37bbc 100644
--- a/llvm/test/CodeGen/AArch64/insertextract.ll
+++ b/llvm/test/CodeGen/AArch64/insertextract.ll
@@ -1390,20 +1390,18 @@ define <2 x i128> @insert_v2i128_c(<2 x i128> %a, i128 %b, i32 %c) {
 ; CHECK-GI-NEXT:    .cfi_offset w30, -8
 ; CHECK-GI-NEXT:    .cfi_offset w29, -16
 ; CHECK-GI-NEXT:    adds x8, x0, x0
-; CHECK-GI-NEXT:    mov v2.d[0], x4
+; CHECK-GI-NEXT:    mov w11, w6
+; CHECK-GI-NEXT:    mov x12, sp
 ; CHECK-GI-NEXT:    adc x9, x1, x1
-; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    adds x8, x2, x2
-; CHECK-GI-NEXT:    mov v1.d[0], x8
-; CHECK-GI-NEXT:    adc x8, x3, x3
-; CHECK-GI-NEXT:    mov v2.d[1], x5
-; CHECK-GI-NEXT:    mov v0.d[1], x9
-; CHECK-GI-NEXT:    mov x9, sp
-; CHECK-GI-NEXT:    mov v1.d[1], x8
-; CHECK-GI-NEXT:    mov w8, w6
-; CHECK-GI-NEXT:    and x8, x8, #0x1
-; CHECK-GI-NEXT:    stp q0, q1, [sp]
-; CHECK-GI-NEXT:    str q2, [x9, x8, lsl #4]
+; CHECK-GI-NEXT:    and x11, x11, #0x1
+; CHECK-GI-NEXT:    adds x10, x2, x2
+; CHECK-GI-NEXT:    stp x8, x9, [sp]
+; CHECK-GI-NEXT:    mov w8, #16 // =0x10
+; CHECK-GI-NEXT:    adc x9, x3, x3
+; CHECK-GI-NEXT:    umaddl x8, w11, w8, x12
+; CHECK-GI-NEXT:    str x10, [sp, #16]
+; CHECK-GI-NEXT:    stur x9, [x12, #24]
+; CHECK-GI-NEXT:    stp x4, x5, [x8]
 ; CHECK-GI-NEXT:    ldp q0, q1, [sp]
 ; CHECK-GI-NEXT:    mov d2, v0.d[1]
 ; CHECK-GI-NEXT:    mov d3, v1.d[1]
@@ -2887,18 +2885,16 @@ define i128 @extract_v2i128_c(<2 x i128> %a, i32 %c) {
 ; CHECK-GI-NEXT:    .cfi_offset w30, -8
 ; CHECK-GI-NEXT:    .cfi_offset w29, -16
 ; CHECK-GI-NEXT:    adds x8, x0, x0
+; CHECK-GI-NEXT:    mov x11, sp
 ; CHECK-GI-NEXT:    adc x9, x1, x1
-; CHECK-GI-NEXT:    mov v0.d[0], x8
-; CHECK-GI-NEXT:    adds x8, x2, x2
-; CHECK-GI-NEXT:    mov v1.d[0], x8
+; CHECK-GI-NEXT:    adds x10, x2, x2
+; CHECK-GI-NEXT:    stp x8, x9, [sp]
 ; CHECK-GI-NEXT:    adc x8, x3, x3
-; CHECK-GI-NEXT:    mov v0.d[1], x9
-; CHECK-GI-NEXT:    mov x9, sp
-; CHECK-GI-NEXT:    mov v1.d[1], x8
-; CHECK-GI-NEXT:    mov w8, w4
-; CHECK-GI-NEXT:    and x8, x8, #0x1
-; CHECK-GI-NEXT:    stp q0, q1, [sp]
-; CHECK-GI-NEXT:    ldr q0, [x9, x8, lsl #4]
+; CHECK-GI-NEXT:    mov w9, w4
+; CHECK-GI-NEXT:    str x10, [sp, #16]
+; CHECK-GI-NEXT:    stur x8, [x11, #24]
+; CHECK-GI-NEXT:    and x8, x9, #0x1
+; CHECK-GI-NEXT:    ldr q0, [x11, x8, lsl #4]
 ; CHECK-GI-NEXT:    mov d1, v0.d[1]
 ; CHECK-GI-NEXT:    fmov x0, d0
 ; CHECK-GI-NEXT:    fmov x1, d1
diff --git a/llvm/test/CodeGen/AArch64/store.ll b/llvm/test/CodeGen/AArch64/store.ll
index 1dc55fccc3dac..c16cd8348d9e3 100644
--- a/llvm/test/CodeGen/AArch64/store.ll
+++ b/llvm/test/CodeGen/AArch64/store.ll
@@ -339,11 +339,8 @@ define void @store_v2i128(<2 x i128> %a, ptr %p) {
 ;
 ; CHECK-GI-LABEL: store_v2i128:
 ; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    mov v0.d[0], x0
-; CHECK-GI-NEXT:    mov v1.d[0], x2
-; CHECK-GI-NEXT:    mov v0.d[1], x1
-; CHECK-GI-NEXT:    mov v1.d[1], x3
-; CHECK-GI-NEXT:    stp q0, q1, [x4]
+; CHECK-GI-NEXT:    stp x0, x1, [x4]
+; CHECK-GI-NEXT:    stp x2, x3, [x4, #16]
 ; CHECK-GI-NEXT:    ret
     store <2 x i128> %a, ptr %p
     ret void



More information about the llvm-commits mailing list