[llvm] 77b124c - [AArch64][GlobalISel] Add legalization for G_VECREDUCE_SEQ_FADD. (#76238)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 5 00:11:48 PST 2024


Author: David Green
Date: 2024-01-05T08:11:44Z
New Revision: 77b124cc574bb427b066fa8c56df7d37a27feb8f

URL: https://github.com/llvm/llvm-project/commit/77b124cc574bb427b066fa8c56df7d37a27feb8f
DIFF: https://github.com/llvm/llvm-project/commit/77b124cc574bb427b066fa8c56df7d37a27feb8f.diff

LOG: [AArch64][GlobalISel] Add legalization for G_VECREDUCE_SEQ_FADD. (#76238)

And G_VECREDUCE_SEQ_FMUL at the same time. They require the elements of
the vector operand to be accumulated in order, so just need to be
scalarized.

Some of the operands are not simplified as much as they can quite yet
due to not canonicalizing constant operands post-legalization.

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
    llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
    llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
    llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 916e6dda399197..586679fa295431 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -350,6 +350,9 @@ class LegalizerHelper {
 
   LegalizeResult fewerElementsVectorReductions(MachineInstr &MI,
                                                unsigned TypeIdx, LLT NarrowTy);
+  LegalizeResult fewerElementsVectorSeqReductions(MachineInstr &MI,
+                                                  unsigned TypeIdx,
+                                                  LLT NarrowTy);
 
   LegalizeResult fewerElementsVectorShuffle(MachineInstr &MI, unsigned TypeIdx,
                                             LLT NarrowTy);

diff  --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 3de8fee1ac4e02..def7f6ebeb011c 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -4718,6 +4718,9 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx,
     return fewerElementsVectorMultiEltType(GMI, NumElts, {2 /*imm*/});
   GISEL_VECREDUCE_CASES_NONSEQ
     return fewerElementsVectorReductions(MI, TypeIdx, NarrowTy);
+  case TargetOpcode::G_VECREDUCE_SEQ_FADD:
+  case TargetOpcode::G_VECREDUCE_SEQ_FMUL:
+    return fewerElementsVectorSeqReductions(MI, TypeIdx, NarrowTy);
   case G_SHUFFLE_VECTOR:
     return fewerElementsVectorShuffle(MI, TypeIdx, NarrowTy);
   case G_FPOWI:
@@ -4951,6 +4954,36 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorReductions(
   return Legalized;
 }
 
+LegalizerHelper::LegalizeResult
+LegalizerHelper::fewerElementsVectorSeqReductions(MachineInstr &MI,
+                                                  unsigned int TypeIdx,
+                                                  LLT NarrowTy) {
+  auto [DstReg, DstTy, ScalarReg, ScalarTy, SrcReg, SrcTy] =
+      MI.getFirst3RegLLTs();
+  if (!NarrowTy.isScalar() || TypeIdx != 2 || DstTy != ScalarTy ||
+      DstTy != NarrowTy)
+    return UnableToLegalize;
+
+  assert((MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FADD ||
+          MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FMUL) &&
+         "Unexpected vecreduce opcode");
+  unsigned ScalarOpc = MI.getOpcode() == TargetOpcode::G_VECREDUCE_SEQ_FADD
+                           ? TargetOpcode::G_FADD
+                           : TargetOpcode::G_FMUL;
+
+  SmallVector<Register> SplitSrcs;
+  unsigned NumParts = SrcTy.getNumElements();
+  extractParts(SrcReg, NarrowTy, NumParts, SplitSrcs);
+  Register Acc = ScalarReg;
+  for (unsigned i = 0; i < NumParts; i++)
+    Acc = MIRBuilder.buildInstr(ScalarOpc, {NarrowTy}, {Acc, SplitSrcs[i]})
+              .getReg(0);
+
+  MIRBuilder.buildCopy(DstReg, Acc);
+  MI.eraseFromParent();
+  return Legalized;
+}
+
 LegalizerHelper::LegalizeResult
 LegalizerHelper::tryNarrowPow2Reduction(MachineInstr &MI, Register SrcReg,
                                         LLT SrcTy, LLT NarrowTy,

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 4ba73736294fcd..470742cdc30e6f 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -1024,6 +1024,10 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .scalarize(1)
       .lower();
 
+  getActionDefinitionsBuilder({G_VECREDUCE_SEQ_FADD, G_VECREDUCE_SEQ_FMUL})
+      .scalarize(2)
+      .lower();
+
   getActionDefinitionsBuilder(G_VECREDUCE_ADD)
       .legalFor({{s8, v16s8},
                  {s8, v8s8},

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index df1c5ff040430a..2adf9763a96b65 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -726,11 +726,12 @@
 # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_SEQ_FADD (opcode {{[0-9]+}}): 3 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_SEQ_FMUL (opcode {{[0-9]+}}): 3 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_FADD (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
 # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
index 63b5a97703e645..20237705cc6e5c 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fadd-strict.ll
@@ -1,13 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
 ; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
 define float @add_HalfS(<2 x float> %bin.rdx)  {
-; CHECK-LABEL: add_HalfS:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    faddp s0, v0.2s
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_HalfS:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    faddp s0, v0.2s
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_HalfS:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #128, lsl #24
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v2f32(float -0.0, <2 x float> %bin.rdx)
   ret float %r
 }
@@ -42,6 +53,46 @@ define half @add_HalfH(<4 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fadd h1, h2, h1
 ; CHECK-SD-FP16-NEXT:    fadd h0, h1, h0
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: add_HalfH:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #32768 // =0x8000
+; CHECK-GI-NOFP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: add_HalfH:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI1_0
+; CHECK-GI-FP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI1_0]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h0
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    fadd h0, h1, h0
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fadd.f16.v4f16(half -0.0, <4 x half> %bin.rdx)
   ret half %r
 }
@@ -103,28 +154,115 @@ define half @add_H(<8 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fadd h1, h1, h2
 ; CHECK-SD-FP16-NEXT:    fadd h0, h1, h0
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: add_H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #32768 // =0x8000
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: add_H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI2_0
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-FP16-NEXT:    ldr h1, [x8, :lo12:.LCPI2_0]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h0
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[6]
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h2
+; CHECK-GI-FP16-NEXT:    fadd h1, h1, h3
+; CHECK-GI-FP16-NEXT:    fadd h0, h1, h0
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fadd.f16.v8f16(half -0.0, <8 x half> %bin.rdx)
   ret half %r
 }
 
 define float @add_S(<4 x float> %bin.rdx)  {
-; CHECK-LABEL: add_S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov s1, v0.s[2]
-; CHECK-NEXT:    faddp s2, v0.2s
-; CHECK-NEXT:    mov s0, v0.s[3]
-; CHECK-NEXT:    fadd s1, s2, s1
-; CHECK-NEXT:    fadd s0, s1, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov s1, v0.s[2]
+; CHECK-SD-NEXT:    faddp s2, v0.2s
+; CHECK-SD-NEXT:    mov s0, v0.s[3]
+; CHECK-SD-NEXT:    fadd s1, s2, s1
+; CHECK-SD-NEXT:    fadd s0, s1, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v1.2s, #128, lsl #24
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    mov s3, v0.s[2]
+; CHECK-GI-NEXT:    fadd s1, s1, s0
+; CHECK-GI-NEXT:    mov s0, v0.s[3]
+; CHECK-GI-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NEXT:    fadd s1, s1, s3
+; CHECK-GI-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %bin.rdx)
   ret float %r
 }
 
 define double @add_D(<2 x double> %bin.rdx)  {
-; CHECK-LABEL: add_D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    faddp d0, v0.2d
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    faddp d0, v0.2d
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-GI-NEXT:    mov d2, v0.d[1]
+; CHECK-GI-NEXT:    fmov d1, x8
+; CHECK-GI-NEXT:    fadd d0, d1, d0
+; CHECK-GI-NEXT:    fadd d0, d0, d2
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %bin.rdx)
   ret double %r
 }
@@ -239,56 +377,220 @@ define half @add_2H(<16 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fadd h0, h0, h3
 ; CHECK-SD-FP16-NEXT:    fadd h0, h0, h1
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: add_2H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #32768 // =0x8000
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s2, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s2, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h1, v1.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fadd s0, s0, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: add_2H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    adrp x8, .LCPI5_0
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h4, v0.h[2]
+; CHECK-GI-FP16-NEXT:    ldr h2, [x8, :lo12:.LCPI5_0]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h0
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h4
+; CHECK-GI-FP16-NEXT:    mov h4, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h4
+; CHECK-GI-FP16-NEXT:    mov h4, v0.h[6]
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v1.h[2]
+; CHECK-GI-FP16-NEXT:    fadd h2, h2, h4
+; CHECK-GI-FP16-NEXT:    fadd h0, h2, h0
+; CHECK-GI-FP16-NEXT:    mov h2, v1.h[1]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v1.h[3]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v1.h[4]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h2
+; CHECK-GI-FP16-NEXT:    mov h2, v1.h[5]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h3
+; CHECK-GI-FP16-NEXT:    mov h3, v1.h[6]
+; CHECK-GI-FP16-NEXT:    mov h1, v1.h[7]
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h2
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h3
+; CHECK-GI-FP16-NEXT:    fadd h0, h0, h1
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fadd.f16.v16f16(half -0.0, <16 x half> %bin.rdx)
   ret half %r
 }
 
 define float @add_2S(<8 x float> %bin.rdx)  {
-; CHECK-LABEL: add_2S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov s2, v0.s[2]
-; CHECK-NEXT:    faddp s3, v0.2s
-; CHECK-NEXT:    mov s0, v0.s[3]
-; CHECK-NEXT:    fadd s2, s3, s2
-; CHECK-NEXT:    mov s3, v1.s[2]
-; CHECK-NEXT:    fadd s0, s2, s0
-; CHECK-NEXT:    mov s2, v1.s[1]
-; CHECK-NEXT:    fadd s0, s0, s1
-; CHECK-NEXT:    mov s1, v1.s[3]
-; CHECK-NEXT:    fadd s0, s0, s2
-; CHECK-NEXT:    fadd s0, s0, s3
-; CHECK-NEXT:    fadd s0, s0, s1
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_2S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov s2, v0.s[2]
+; CHECK-SD-NEXT:    faddp s3, v0.2s
+; CHECK-SD-NEXT:    mov s0, v0.s[3]
+; CHECK-SD-NEXT:    fadd s2, s3, s2
+; CHECK-SD-NEXT:    mov s3, v1.s[2]
+; CHECK-SD-NEXT:    fadd s0, s2, s0
+; CHECK-SD-NEXT:    mov s2, v1.s[1]
+; CHECK-SD-NEXT:    fadd s0, s0, s1
+; CHECK-SD-NEXT:    mov s1, v1.s[3]
+; CHECK-SD-NEXT:    fadd s0, s0, s2
+; CHECK-SD-NEXT:    fadd s0, s0, s3
+; CHECK-SD-NEXT:    fadd s0, s0, s1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_2S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    movi v2.2s, #128, lsl #24
+; CHECK-GI-NEXT:    mov s3, v0.s[1]
+; CHECK-GI-NEXT:    mov s4, v0.s[2]
+; CHECK-GI-NEXT:    fadd s2, s2, s0
+; CHECK-GI-NEXT:    mov s0, v0.s[3]
+; CHECK-GI-NEXT:    fadd s2, s2, s3
+; CHECK-GI-NEXT:    mov s3, v1.s[2]
+; CHECK-GI-NEXT:    fadd s2, s2, s4
+; CHECK-GI-NEXT:    fadd s0, s2, s0
+; CHECK-GI-NEXT:    mov s2, v1.s[1]
+; CHECK-GI-NEXT:    fadd s0, s0, s1
+; CHECK-GI-NEXT:    mov s1, v1.s[3]
+; CHECK-GI-NEXT:    fadd s0, s0, s2
+; CHECK-GI-NEXT:    fadd s0, s0, s3
+; CHECK-GI-NEXT:    fadd s0, s0, s1
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %bin.rdx)
   ret float %r
 }
 
 define double @add_2D(<4 x double> %bin.rdx)  {
-; CHECK-LABEL: add_2D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    faddp d0, v0.2d
-; CHECK-NEXT:    mov d2, v1.d[1]
-; CHECK-NEXT:    fadd d0, d0, d1
-; CHECK-NEXT:    fadd d0, d0, d2
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_2D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    faddp d0, v0.2d
+; CHECK-SD-NEXT:    mov d2, v1.d[1]
+; CHECK-SD-NEXT:    fadd d0, d0, d1
+; CHECK-SD-NEXT:    fadd d0, d0, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_2D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-GI-NEXT:    mov d3, v0.d[1]
+; CHECK-GI-NEXT:    fmov d2, x8
+; CHECK-GI-NEXT:    fadd d0, d2, d0
+; CHECK-GI-NEXT:    mov d2, v1.d[1]
+; CHECK-GI-NEXT:    fadd d0, d0, d3
+; CHECK-GI-NEXT:    fadd d0, d0, d1
+; CHECK-GI-NEXT:    fadd d0, d0, d2
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fadd.f64.v4f64(double -0.0, <4 x double> %bin.rdx)
   ret double %r
 }
 
 ; Added at least one test where the start value is not -0.0.
 define float @add_S_init_42(<4 x float> %bin.rdx)  {
-; CHECK-LABEL: add_S_init_42:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #1109917696 // =0x42280000
-; CHECK-NEXT:    mov s2, v0.s[1]
-; CHECK-NEXT:    mov s3, v0.s[2]
-; CHECK-NEXT:    fmov s1, w8
-; CHECK-NEXT:    fadd s1, s0, s1
-; CHECK-NEXT:    mov s0, v0.s[3]
-; CHECK-NEXT:    fadd s1, s1, s2
-; CHECK-NEXT:    fadd s1, s1, s3
-; CHECK-NEXT:    fadd s0, s1, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: add_S_init_42:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov w8, #1109917696 // =0x42280000
+; CHECK-SD-NEXT:    mov s2, v0.s[1]
+; CHECK-SD-NEXT:    mov s3, v0.s[2]
+; CHECK-SD-NEXT:    fmov s1, w8
+; CHECK-SD-NEXT:    fadd s1, s0, s1
+; CHECK-SD-NEXT:    mov s0, v0.s[3]
+; CHECK-SD-NEXT:    fadd s1, s1, s2
+; CHECK-SD-NEXT:    fadd s1, s1, s3
+; CHECK-SD-NEXT:    fadd s0, s1, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: add_S_init_42:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov w8, #1109917696 // =0x42280000
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    mov s3, v0.s[2]
+; CHECK-GI-NEXT:    fmov s1, w8
+; CHECK-GI-NEXT:    fadd s1, s1, s0
+; CHECK-GI-NEXT:    mov s0, v0.s[3]
+; CHECK-GI-NEXT:    fadd s1, s1, s2
+; CHECK-GI-NEXT:    fadd s1, s1, s3
+; CHECK-GI-NEXT:    fadd s0, s1, s0
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fadd.f32.v4f32(float 42.0, <4 x float> %bin.rdx)
   ret float %r
 }
@@ -303,4 +605,4 @@ declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>)
 declare double @llvm.vector.reduce.fadd.f64.v2f64(double, <2 x double>)
 declare double @llvm.vector.reduce.fadd.f64.v4f64(double, <4 x double>)
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-SD: {{.*}}
+; CHECK: {{.*}}

diff  --git a/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll b/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
index 68cd3496a923ab..32ce4d6eb96756 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-fmul-strict.ll
@@ -1,13 +1,24 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=aarch64-none-eabi -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-NOFP16
 ; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD,CHECK-SD-FP16
+; RUN: llc -mtriple=aarch64-none-eabi -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
+; RUN: llc -mtriple=aarch64-none-eabi -mattr=+fullfp16 -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
 define float @mul_HalfS(<2 x float> %bin.rdx)  {
-; CHECK-LABEL: mul_HalfS:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmul s0, s0, v0.s[1]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_HalfS:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    fmul s0, s0, v0.s[1]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_HalfS:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s1, #1.00000000
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s2, v0.s[1]
+; CHECK-GI-NEXT:    fmul s0, s1, s0
+; CHECK-GI-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fmul.f32.v2f32(float 1.0, <2 x float> %bin.rdx)
   ret float %r
 }
@@ -40,6 +51,45 @@ define half @mul_HalfH(<4 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fmul h1, h1, v0.h[2]
 ; CHECK-SD-FP16-NEXT:    fmul h0, h1, v0.h[3]
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: mul_HalfH:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #15360 // =0x3c00
+; CHECK-GI-NOFP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: mul_HalfH:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    fmov h1, #1.00000000
+; CHECK-GI-FP16-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-FP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h0
+; CHECK-GI-FP16-NEXT:    mov h0, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h2
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h3
+; CHECK-GI-FP16-NEXT:    fmul h0, h1, h0
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fmul.f16.v4f16(half 1.0, <4 x half> %bin.rdx)
   ret half %r
 }
@@ -95,26 +145,100 @@ define half @mul_H(<8 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fmul h1, h1, v0.h[6]
 ; CHECK-SD-FP16-NEXT:    fmul h0, h1, v0.h[7]
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: mul_H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #15360 // =0x3c00
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s1, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s1, s1, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h1, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s1, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: mul_H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    fmov h1, #1.00000000
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, h0
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[1]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fmul h1, h1, v0.h[6]
+; CHECK-GI-FP16-NEXT:    fmul h0, h1, v0.h[7]
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fmul.f16.v8f16(half 1.0, <8 x half> %bin.rdx)
   ret half %r
 }
 
 define float @mul_S(<4 x float> %bin.rdx)  {
-; CHECK-LABEL: mul_S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul s1, s0, v0.s[1]
-; CHECK-NEXT:    fmul s1, s1, v0.s[2]
-; CHECK-NEXT:    fmul s0, s1, v0.s[3]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul s1, s0, v0.s[1]
+; CHECK-SD-NEXT:    fmul s1, s1, v0.s[2]
+; CHECK-SD-NEXT:    fmul s0, s1, v0.s[3]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s1, #1.00000000
+; CHECK-GI-NEXT:    fmul s1, s1, s0
+; CHECK-GI-NEXT:    fmul s1, s1, v0.s[1]
+; CHECK-GI-NEXT:    fmul s1, s1, v0.s[2]
+; CHECK-GI-NEXT:    fmul s0, s1, v0.s[3]
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %bin.rdx)
   ret float %r
 }
 
 define double @mul_D(<2 x double> %bin.rdx)  {
-; CHECK-LABEL: mul_D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul d0, d0, v0.d[1]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul d0, d0, v0.d[1]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov d1, #1.00000000
+; CHECK-GI-NEXT:    fmul d1, d1, d0
+; CHECK-GI-NEXT:    fmul d0, d1, v0.d[1]
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fmul.f64.v2f64(double 1.0, <2 x double> %bin.rdx)
   ret double %r
 }
@@ -216,32 +340,159 @@ define half @mul_2H(<16 x half> %bin.rdx)  {
 ; CHECK-SD-FP16-NEXT:    fmul h0, h0, v1.h[6]
 ; CHECK-SD-FP16-NEXT:    fmul h0, h0, v1.h[7]
 ; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: mul_2H:
+; CHECK-GI-NOFP16:       // %bb.0:
+; CHECK-GI-NOFP16-NEXT:    mov w8, #15360 // =0x3c00
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h0
+; CHECK-GI-NOFP16-NEXT:    fmov s2, w8
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    mov h3, v0.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h0, v0.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s3, h3
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s2, s2, s3
+; CHECK-GI-NOFP16-NEXT:    fcvt h2, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s2, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[1]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[2]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[3]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[4]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[5]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    mov h2, v1.h[6]
+; CHECK-GI-NOFP16-NEXT:    mov h1, v1.h[7]
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s2, h2
+; CHECK-GI-NOFP16-NEXT:    fcvt s1, h1
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s2
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    fcvt s0, h0
+; CHECK-GI-NOFP16-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NOFP16-NEXT:    fcvt h0, s0
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: mul_2H:
+; CHECK-GI-FP16:       // %bb.0:
+; CHECK-GI-FP16-NEXT:    fmov h2, #1.00000000
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, h0
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[1]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[4]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[5]
+; CHECK-GI-FP16-NEXT:    fmul h2, h2, v0.h[6]
+; CHECK-GI-FP16-NEXT:    fmul h0, h2, v0.h[7]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, h1
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[1]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[2]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[3]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[4]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[5]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[6]
+; CHECK-GI-FP16-NEXT:    fmul h0, h0, v1.h[7]
+; CHECK-GI-FP16-NEXT:    ret
   %r = call half @llvm.vector.reduce.fmul.f16.v16f16(half 1.0, <16 x half> %bin.rdx)
   ret half %r
 }
 
 define float @mul_2S(<8 x float> %bin.rdx)  {
-; CHECK-LABEL: mul_2S:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul s2, s0, v0.s[1]
-; CHECK-NEXT:    fmul s2, s2, v0.s[2]
-; CHECK-NEXT:    fmul s0, s2, v0.s[3]
-; CHECK-NEXT:    fmul s0, s0, s1
-; CHECK-NEXT:    fmul s0, s0, v1.s[1]
-; CHECK-NEXT:    fmul s0, s0, v1.s[2]
-; CHECK-NEXT:    fmul s0, s0, v1.s[3]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_2S:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul s2, s0, v0.s[1]
+; CHECK-SD-NEXT:    fmul s2, s2, v0.s[2]
+; CHECK-SD-NEXT:    fmul s0, s2, v0.s[3]
+; CHECK-SD-NEXT:    fmul s0, s0, s1
+; CHECK-SD-NEXT:    fmul s0, s0, v1.s[1]
+; CHECK-SD-NEXT:    fmul s0, s0, v1.s[2]
+; CHECK-SD-NEXT:    fmul s0, s0, v1.s[3]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_2S:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s2, #1.00000000
+; CHECK-GI-NEXT:    fmul s2, s2, s0
+; CHECK-GI-NEXT:    fmul s2, s2, v0.s[1]
+; CHECK-GI-NEXT:    fmul s2, s2, v0.s[2]
+; CHECK-GI-NEXT:    fmul s0, s2, v0.s[3]
+; CHECK-GI-NEXT:    fmul s0, s0, s1
+; CHECK-GI-NEXT:    fmul s0, s0, v1.s[1]
+; CHECK-GI-NEXT:    fmul s0, s0, v1.s[2]
+; CHECK-GI-NEXT:    fmul s0, s0, v1.s[3]
+; CHECK-GI-NEXT:    ret
   %r = call float @llvm.vector.reduce.fmul.f32.v8f32(float 1.0, <8 x float> %bin.rdx)
   ret float %r
 }
 
 define double @mul_2D(<4 x double> %bin.rdx)  {
-; CHECK-LABEL: mul_2D:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmul d0, d0, v0.d[1]
-; CHECK-NEXT:    fmul d0, d0, d1
-; CHECK-NEXT:    fmul d0, d0, v1.d[1]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: mul_2D:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmul d0, d0, v0.d[1]
+; CHECK-SD-NEXT:    fmul d0, d0, d1
+; CHECK-SD-NEXT:    fmul d0, d0, v1.d[1]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: mul_2D:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov d2, #1.00000000
+; CHECK-GI-NEXT:    fmul d2, d2, d0
+; CHECK-GI-NEXT:    fmul d0, d2, v0.d[1]
+; CHECK-GI-NEXT:    fmul d0, d0, d1
+; CHECK-GI-NEXT:    fmul d0, d0, v1.d[1]
+; CHECK-GI-NEXT:    ret
   %r = call double @llvm.vector.reduce.fmul.f64.v4f64(double 1.0, <4 x double> %bin.rdx)
   ret double %r
 }
@@ -270,5 +521,3 @@ declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>)
 declare float @llvm.vector.reduce.fmul.f32.v8f32(float, <8 x float>)
 declare double @llvm.vector.reduce.fmul.f64.v2f64(double, <2 x double>)
 declare double @llvm.vector.reduce.fmul.f64.v4f64(double, <4 x double>)
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-SD: {{.*}}


        


More information about the llvm-commits mailing list