[llvm] [AArch64][GlobalISel] Add legalization for G_VECREDUCE_SEQ_FADD. (PR #76238)

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 22 05:53:26 PST 2023


https://github.com/davemgreen created https://github.com/llvm/llvm-project/pull/76238

And G_VECREDUCE_SEQ_FMUL at the same time. They require the elements of the vector operand to be accumulated in order, so just need to be scalarized.

Some of the operands are not simplified as much as they can quite yet due to not canonicalizing constant operands post-legalization.

>From fd900fc792ff2d494cc138bae3c1491b7c80599d Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Fri, 22 Dec 2023 13:52:44 +0000
Subject: [PATCH] [AArch64][GlobalISel] Add legalization for
 G_VECREDUCE_SEQ_FADD.

And G_VECREDUCE_SEQ_FMUL at the same time. They require the elements of the
vector operand to be accumulated in order, so just need to be scalarized.

Some of the operands are not simplified as much as they can quite yet due to
not canonicalizing constant operands post-legalization.
---
 .../llvm/CodeGen/GlobalISel/LegalizerHelper.h |   3 +
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |  33 ++
 .../AArch64/GISel/AArch64LegalizerInfo.cpp    |   4 +
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp |   6 -
 .../GlobalISel/legalizer-info-validation.mir  |   9 +-
 .../CodeGen/AArch64/vecreduce-fadd-strict.ll  | 406 +++++++++++++++---
 .../CodeGen/AArch64/vecreduce-fmul-strict.ll  | 315 ++++++++++++--
 7 files changed, 681 insertions(+), 95 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 711ba10247c34d..24204448785b24 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -347,6 +347,9 @@ class LegalizerHelper {
 
   LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
                                                unsigned TypeIdx, LLT NarrowTy);
+  LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
+                                                  unsigned TypeIdx,
+                                                  LLT NarrowTy);
 
   LegalizeResult fewerElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx,
                                             LLT NarrowTy);
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 37e7153be5720e..09239e2b38af10 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -4515,6 +4515,9 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
     return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*imm*/});
   GISEL_VECREDUCE_CASES_NONSEQ
     return fewerElementsVectorReductions(MI, TypeIdx, NarrowTy);
+  case TargetOpcode::G_VECREDUCE_SEQ_FADD:
+  case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
+    return fewerElementsVectorSeqReductions(MI, TypeIdx, NarrowTy);
   case G_SHUFFLE_VECTOR:
     return fewerElementsVectorShuffle(MI, TypeIdx, NarrowTy);
   default:
@@ -4746,6 +4749,36 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorReductions(
   return Legalized;
 }
 
+LegalizerHelper::LegalizeResult
+LegalizerHelper::fewerElementsVectorSeqReductions(MachineInstr &MI,
+                                                  unsigned int TypeIdx,
+                                                  LLT NarrowTy) {
+  auto [DstReg, DstTy, ScalarReg, ScalarTy, SrcReg, SrcTy] =
+      MI.getFirst3RegLLTs();
+  if (!NarrowTy.isScalar() || TypeIdx != 2 || DstTy != ScalarTy ||
+      DstTy != NarrowTy)
+    return UnableToLegalize;
+
+  assert((MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FADD ||
+          MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FMUL) &&
+         "Unexpected vecreduce opcode");
+  unsigned ScalarOpc = MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FADD
+                           ? TargetOpcode::G_FADD
+                           : TargetOpcode::G_FMUL;
+
+  SmallVector<Register> SplitSrcs;
+  unsigned NumParts = SrcTy.getNumElements();
+  extractParts(SrcReg, NarrowTy, NumParts, SplitSrcs);
+  Register Acc = ScalarReg;
+  for (unsigned i = 0; i < NumParts; i++)
+    Acc = MIRBuilder.buildInstr(ScalarOpc, {NarrowTy}, {Acc, SplitSrcs[i]})
+              .getReg(0);
+
+  MIRBuilder.buildCopy(DstReg, Acc);
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 LegalizerHelper::LegalizeResult
 LegalizerHelper::tryNarrowPow2Reduction(MachineInstr &MI, Register SrcReg,
                                         LLT SrcTy, LLT NarrowTy,
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 8b909f53c84460..8f97e029ad50a4 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -985,6 +985,10 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .clampMaxNumElements(1, s16, 8)
       .lower();
 
+  getActionDefinitionsBuilder({G_VECREDUCE_SEQ_FADD, G_VECREDUCE_SEQ_FMUL})
+      .scalarize(2)
+      .lower();
+
   getActionDefinitionsBuilder(G_VECREDUCE_ADD)
       .legalFor({{s8, v16s8},
                  {s8, v8s8},
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index b8e5e7bbdaba77..5e7bb2ae06c94e 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -1071,12 +1071,6 @@ AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     // FPR bank.
     OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR};
     break;
-  case TargetOpcode::G_VECREDUCE_SEQ_FADD:
-  case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
-    // These reductions also take a scalar accumulator input.
-    // Assign them FPR for now.
-    OpRegBankIdx = {PMI_FirstFPR, PMI_FirstFPR, PMI_FirstFPR};
-    break;
   case TargetOpcode::G_INTRINSIC:
   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
     // Check if we know that the intrinsic has any constraints on its register
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 178db852e35b7e..cc85defa8965ec 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -727,11 +727,12 @@
 # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_SEQ_FADD (opcode {{[0-9]+}}): 3 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_SEQ_FMUL (opcode {{[0-9]+}}): 3 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_FADD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
 # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
index 63b5a97703e645..20237705cc6e5c 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
@@ -1,13 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
 ; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
 define float @add_HalfS(<2 x float> %bin.rdx)  {
-; CHECK-LABEL: add_HalfS:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    faddp s0, v0.2s
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_HalfS:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    faddp s0, v0.2s
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_HalfS:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #128, lsl #24
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v2f32(float -0.0, <2 x float> %bin.rdx)
   ret float %r
 }
@@ -42,6 +53,46 @@ define half @add_HalfH(<4 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fadd h1, h2, h1
 ; CHECK-SD-FP16-NEXT:    fadd h0, h1, h0
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: add_HalfH:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #32768 // =0x8000
+; CHECK-GI-NOFP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: add_HalfH:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI1_0
+; CHECK-GI-FP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI1_0]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h0
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    fadd h0, h1, h0
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fadd.f16.v4f16(half -0.0, <4 x half> %bin.rdx)
   ret half %r
 }
@@ -103,28 +154,115 @@ define half @add_H(<8 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fadd h1, h1, h2
 ; CHECK-SD-FP16-NEXT:    fadd h0, h1, h0
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: add_H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #32768 // =0x8000
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: add_H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI2_0
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI2_0]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h0
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[6]
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    fadd h0, h1, h0
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fadd.f16.v8f16(half -0.0, <8 x half> %bin.rdx)
   ret half %r
 }
 
 define float @add_S(<4 x float> %bin.rdx)  {
-; CHECK-LABEL: add_S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov s1, v0.s[2]
-; CHECK-NEXT:    faddp s2, v0.2s
-; CHECK-NEXT:    mov s0, v0.s[3]
-; CHECK-NEXT:    fadd s1, s2, s1
-; CHECK-NEXT:    fadd s0, s1, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov s1, v0.s[2]
+; CHECK-SD-NEXT:    faddp s2, v0.2s
+; CHECK-SD-NEXT:    mov s0, v0.s[3]
+; CHECK-SD-NEXT:    fadd s1, s2, s1
+; CHECK-SD-NEXT:    fadd s0, s1, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #128, lsl #24
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    mov s3, v0.s[2]
+; CHECK-GI-NEXT:    fadd s1, s1, s0
+; CHECK-GI-NEXT:    mov s0, v0.s[3]
+; CHECK-GI-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NEXT:    fadd s1, s1, s3
+; CHECK-GI-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %bin.rdx)
   ret float %r
 }
 
 define double @add_D(<2 x double> %bin.rdx)  {
-; CHECK-LABEL: add_D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    faddp d0, v0.2d
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    faddp d0, v0.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-GI-NEXT:    mov d2, v0.d[1]
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fadd d0, d1, d0
+; CHECK-GI-NEXT:    fadd d0, d0, d2
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %bin.rdx)
   ret double %r
 }
@@ -239,56 +377,220 @@ define half @add_2H(<16 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fadd h0, h0, h3
 ; CHECK-SD-FP16-NEXT:    fadd h0, h0, h1
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: add_2H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #32768 // =0x8000
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s2, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s2, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h1, v1.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: add_2H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI5_0
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h4, v0.h[2]
+; CHECK-GI-FP16-NEXT:    ldr h2, [x8, :lo12:.LCPI5_0]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h0
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h4
+; CHECK-GI-FP16-NEXT:    mov h4, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h4
+; CHECK-GI-FP16-NEXT:    mov h4, v0.h[6]
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v1.h[2]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h4
+; CHECK-GI-FP16-NEXT:    fadd h0, h2, h0
+; CHECK-GI-FP16-NEXT:    mov h2, v1.h[1]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v1.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v1.h[4]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v1.h[5]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v1.h[6]
+; CHECK-GI-FP16-NEXT:    mov h1, v1.h[7]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h2
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h3
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fadd.f16.v16f16(half -0.0, <16 x half> %bin.rdx)
   ret half %r
 }
 
 define float @add_2S(<8 x float> %bin.rdx)  {
-; CHECK-LABEL: add_2S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov s2, v0.s[2]
-; CHECK-NEXT:    faddp s3, v0.2s
-; CHECK-NEXT:    mov s0, v0.s[3]
-; CHECK-NEXT:    fadd s2, s3, s2
-; CHECK-NEXT:    mov s3, v1.s[2]
-; CHECK-NEXT:    fadd s0, s2, s0
-; CHECK-NEXT:    mov s2, v1.s[1]
-; CHECK-NEXT:    fadd s0, s0, s1
-; CHECK-NEXT:    mov s1, v1.s[3]
-; CHECK-NEXT:    fadd s0, s0, s2
-; CHECK-NEXT:    fadd s0, s0, s3
-; CHECK-NEXT:    fadd s0, s0, s1
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_2S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov s2, v0.s[2]
+; CHECK-SD-NEXT:    faddp s3, v0.2s
+; CHECK-SD-NEXT:    mov s0, v0.s[3]
+; CHECK-SD-NEXT:    fadd s2, s3, s2
+; CHECK-SD-NEXT:    mov s3, v1.s[2]
+; CHECK-SD-NEXT:    fadd s0, s2, s0
+; CHECK-SD-NEXT:    mov s2, v1.s[1]
+; CHECK-SD-NEXT:    fadd s0, s0, s1
+; CHECK-SD-NEXT:    mov s1, v1.s[3]
+; CHECK-SD-NEXT:    fadd s0, s0, s2
+; CHECK-SD-NEXT:    fadd s0, s0, s3
+; CHECK-SD-NEXT:    fadd s0, s0, s1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_2S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v2.2s, #128, lsl #24
+; CHECK-GI-NEXT:    mov s3, v0.s[1]
+; CHECK-GI-NEXT:    mov s4, v0.s[2]
+; CHECK-GI-NEXT:    fadd s2, s2, s0
+; CHECK-GI-NEXT:    mov s0, v0.s[3]
+; CHECK-GI-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NEXT:    mov s3, v1.s[2]
+; CHECK-GI-NEXT:    fadd s2, s2, s4
+; CHECK-GI-NEXT:    fadd s0, s2, s0
+; CHECK-GI-NEXT:    mov s2, v1.s[1]
+; CHECK-GI-NEXT:    fadd s0, s0, s1
+; CHECK-GI-NEXT:    mov s1, v1.s[3]
+; CHECK-GI-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NEXT:    fadd s0, s0, s3
+; CHECK-GI-NEXT:    fadd s0, s0, s1
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %bin.rdx)
   ret float %r
 }
 
 define double @add_2D(<4 x double> %bin.rdx)  {
-; CHECK-LABEL: add_2D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    faddp d0, v0.2d
-; CHECK-NEXT:    mov d2, v1.d[1]
-; CHECK-NEXT:    fadd d0, d0, d1
-; CHECK-NEXT:    fadd d0, d0, d2
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_2D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    faddp d0, v0.2d
+; CHECK-SD-NEXT:    mov d2, v1.d[1]
+; CHECK-SD-NEXT:    fadd d0, d0, d1
+; CHECK-SD-NEXT:    fadd d0, d0, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_2D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    fadd d0, d2, d0
+; CHECK-GI-NEXT:    mov d2, v1.d[1]
+; CHECK-GI-NEXT:    fadd d0, d0, d3
+; CHECK-GI-NEXT:    fadd d0, d0, d1
+; CHECK-GI-NEXT:    fadd d0, d0, d2
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fadd.f64.v4f64(double -0.0, <4 x double> %bin.rdx)
   ret double %r
 }
 
 ; Added at least one test where the start value is not -0.0.
 define float @add_S_init_42(<4 x float> %bin.rdx)  {
-; CHECK-LABEL: add_S_init_42:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #1109917696 // =0x42280000
-; CHECK-NEXT:    mov s2, v0.s[1]
-; CHECK-NEXT:    mov s3, v0.s[2]
-; CHECK-NEXT:    fmov s1, w8
-; CHECK-NEXT:    fadd s1, s0, s1
-; CHECK-NEXT:    mov s0, v0.s[3]
-; CHECK-NEXT:    fadd s1, s1, s2
-; CHECK-NEXT:    fadd s1, s1, s3
-; CHECK-NEXT:    fadd s0, s1, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_S_init_42:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #1109917696 // =0x42280000
+; CHECK-SD-NEXT:    mov s2, v0.s[1]
+; CHECK-SD-NEXT:    mov s3, v0.s[2]
+; CHECK-SD-NEXT:    fmov s1, w8
+; CHECK-SD-NEXT:    fadd s1, s0, s1
+; CHECK-SD-NEXT:    mov s0, v0.s[3]
+; CHECK-SD-NEXT:    fadd s1, s1, s2
+; CHECK-SD-NEXT:    fadd s1, s1, s3
+; CHECK-SD-NEXT:    fadd s0, s1, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_S_init_42:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1109917696 // =0x42280000
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    mov s3, v0.s[2]
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fadd s1, s1, s0
+; CHECK-GI-NEXT:    mov s0, v0.s[3]
+; CHECK-GI-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NEXT:    fadd s1, s1, s3
+; CHECK-GI-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v4f32(float 42.0, <4 x float> %bin.rdx)
   ret float %r
 }
@@ -303,4 +605,4 @@ declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>)
 declare double @llvm.vector.reduce.fadd.f64.v2f64(double, <2 x double>)
 declare double @llvm.vector.reduce.fadd.f64.v4f64(double, <4 x double>)
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-SD: {{.*}}
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
index 68cd3496a923ab..32ce4d6eb96756 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
@@ -1,13 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
 ; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
 define float @mul_HalfS(<2 x float> %bin.rdx)  {
-; CHECK-LABEL: mul_HalfS:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmul s0, s0, v0.s[1]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_HalfS:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    fmul s0, s0, v0.s[1]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_HalfS:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s1, #1.00000000
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    fmul s0, s1, s0
+; CHECK-GI-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fmul.f32.v2f32(float 1.0, <2 x float> %bin.rdx)
   ret float %r
 }
@@ -40,6 +51,45 @@ define half @mul_HalfH(<4 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fmul h1, h1, v0.h[2]
 ; CHECK-SD-FP16-NEXT:    fmul h0, h1, v0.h[3]
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: mul_HalfH:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #15360 // =0x3c00
+; CHECK-GI-NOFP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: mul_HalfH:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    fmov h1, #1.00000000
+; CHECK-GI-FP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h0
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h2
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h3
+; CHECK-GI-FP16-NEXT:    fmul h0, h1, h0
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fmul.f16.v4f16(half 1.0, <4 x half> %bin.rdx)
   ret half %r
 }
@@ -95,26 +145,100 @@ define half @mul_H(<8 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fmul h1, h1, v0.h[6]
 ; CHECK-SD-FP16-NEXT:    fmul h0, h1, v0.h[7]
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: mul_H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #15360 // =0x3c00
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: mul_H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    fmov h1, #1.00000000
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h0
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[1]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[6]
+; CHECK-GI-FP16-NEXT:    fmul h0, h1, v0.h[7]
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fmul.f16.v8f16(half 1.0, <8 x half> %bin.rdx)
   ret half %r
 }
 
 define float @mul_S(<4 x float> %bin.rdx)  {
-; CHECK-LABEL: mul_S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul s1, s0, v0.s[1]
-; CHECK-NEXT:    fmul s1, s1, v0.s[2]
-; CHECK-NEXT:    fmul s0, s1, v0.s[3]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul s1, s0, v0.s[1]
+; CHECK-SD-NEXT:    fmul s1, s1, v0.s[2]
+; CHECK-SD-NEXT:    fmul s0, s1, v0.s[3]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s1, #1.00000000
+; CHECK-GI-NEXT:    fmul s1, s1, s0
+; CHECK-GI-NEXT:    fmul s1, s1, v0.s[1]
+; CHECK-GI-NEXT:    fmul s1, s1, v0.s[2]
+; CHECK-GI-NEXT:    fmul s0, s1, v0.s[3]
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %bin.rdx)
   ret float %r
 }
 
 define double @mul_D(<2 x double> %bin.rdx)  {
-; CHECK-LABEL: mul_D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul d0, d0, v0.d[1]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul d0, d0, v0.d[1]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov d1, #1.00000000
+; CHECK-GI-NEXT:    fmul d1, d1, d0
+; CHECK-GI-NEXT:    fmul d0, d1, v0.d[1]
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fmul.f64.v2f64(double 1.0, <2 x double> %bin.rdx)
   ret double %r
 }
@@ -216,32 +340,159 @@ define half @mul_2H(<16 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fmul h0, h0, v1.h[6]
 ; CHECK-SD-FP16-NEXT:    fmul h0, h0, v1.h[7]
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: mul_2H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #15360 // =0x3c00
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s2, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s2, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h1, v1.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: mul_2H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    fmov h2, #1.00000000
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, h0
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[6]
+; CHECK-GI-FP16-NEXT:    fmul h0, h2, v0.h[7]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[1]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[4]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[5]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[6]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[7]
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fmul.f16.v16f16(half 1.0, <16 x half> %bin.rdx)
   ret half %r
 }
 
 define float @mul_2S(<8 x float> %bin.rdx)  {
-; CHECK-LABEL: mul_2S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul s2, s0, v0.s[1]
-; CHECK-NEXT:    fmul s2, s2, v0.s[2]
-; CHECK-NEXT:    fmul s0, s2, v0.s[3]
-; CHECK-NEXT:    fmul s0, s0, s1
-; CHECK-NEXT:    fmul s0, s0, v1.s[1]
-; CHECK-NEXT:    fmul s0, s0, v1.s[2]
-; CHECK-NEXT:    fmul s0, s0, v1.s[3]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_2S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul s2, s0, v0.s[1]
+; CHECK-SD-NEXT:    fmul s2, s2, v0.s[2]
+; CHECK-SD-NEXT:    fmul s0, s2, v0.s[3]
+; CHECK-SD-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NEXT:    fmul s0, s0, v1.s[1]
+; CHECK-SD-NEXT:    fmul s0, s0, v1.s[2]
+; CHECK-SD-NEXT:    fmul s0, s0, v1.s[3]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_2S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s2, #1.00000000
+; CHECK-GI-NEXT:    fmul s2, s2, s0
+; CHECK-GI-NEXT:    fmul s2, s2, v0.s[1]
+; CHECK-GI-NEXT:    fmul s2, s2, v0.s[2]
+; CHECK-GI-NEXT:    fmul s0, s2, v0.s[3]
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fmul s0, s0, v1.s[1]
+; CHECK-GI-NEXT:    fmul s0, s0, v1.s[2]
+; CHECK-GI-NEXT:    fmul s0, s0, v1.s[3]
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fmul.f32.v8f32(float 1.0, <8 x float> %bin.rdx)
   ret float %r
 }
 
 define double @mul_2D(<4 x double> %bin.rdx)  {
-; CHECK-LABEL: mul_2D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul d0, d0, v0.d[1]
-; CHECK-NEXT:    fmul d0, d0, d1
-; CHECK-NEXT:    fmul d0, d0, v1.d[1]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_2D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul d0, d0, v0.d[1]
+; CHECK-SD-NEXT:    fmul d0, d0, d1
+; CHECK-SD-NEXT:    fmul d0, d0, v1.d[1]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_2D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov d2, #1.00000000
+; CHECK-GI-NEXT:    fmul d2, d2, d0
+; CHECK-GI-NEXT:    fmul d0, d2, v0.d[1]
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fmul d0, d0, v1.d[1]
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fmul.f64.v4f64(double 1.0, <4 x double> %bin.rdx)
   ret double %r
 }
@@ -270,5 +521,3 @@ declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>)
 declare float @llvm.vector.reduce.fmul.f32.v8f32(float, <8 x float>)
 declare double @llvm.vector.reduce.fmul.f64.v2f64(double, <2 x double>)
 declare double @llvm.vector.reduce.fmul.f64.v4f64(double, <4 x double>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-SD: {{.*}}



More information about the llvm-commits mailing list