[llvm] [AArch64][GlobalISel] Legalize G_VECREDUCE_{MIN/MAX} (PR #69461)

via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 2 06:10:05 PDT 2023


https://github.com/chuongg3 updated https://github.com/llvm/llvm-project/pull/69461

>From d69ff64bb3470aea71b6a5d6fbe3037077181f0d Mon Sep 17 00:00:00 2001
From: Tuan Chuong Goh <chuong.goh at arm.com>
Date: Mon, 9 Oct 2023 09:55:14 +0100
Subject: [PATCH 1/3] [AArch64][GlobalISel] Legalize G_VECREDUCE_{MIN/MAX}

Legalizes G_VECREDUCE_{MIN/MAX} and selects instructions for
vecreduce_{min/max}
---
 llvm/lib/Target/AArch64/AArch64InstrGISel.td  |    5 +
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |   36 +
 .../AArch64/GISel/AArch64LegalizerInfo.cpp    |   15 +
 .../GlobalISel/legalizer-info-validation.mir  |   19 +-
 llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll  | 1869 ++++++++++++++++-
 5 files changed, 1854 insertions(+), 90 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index 27338bd24393325..a3e8b1fff32eee9 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -274,6 +274,11 @@ def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
 def : GINodeEquiv<G_PREFETCH, AArch64Prefetch>;
 
+def : GINodeEquiv<G_VECREDUCE_UMIN, vecreduce_umin>;
+def : GINodeEquiv<G_VECREDUCE_UMAX, vecreduce_umax>;
+def : GINodeEquiv<G_VECREDUCE_SMIN, vecreduce_smin>;
+def : GINodeEquiv<G_VECREDUCE_SMAX, vecreduce_smax>;
+
 // These are patterns that we only use for GlobalISel via the importer.
 def : Pat<(f32 (fadd (vector_extract (v2f32 FPR64:$Rn), (i64 0)),
                      (vector_extract (v2f32 FPR64:$Rn), (i64 1)))),
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index ee42612c0fcdd2a..caed23c39c14408 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6663,6 +6663,42 @@ defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
 def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
           (UMINPv2i32 V64:$Rn, V64:$Rn)>;
 
+// For vecreduce_{opc}
+multiclass SIMDAcrossLanesVecReductionIntrinsic<string baseOpc,
+                                            SDPatternOperator opNode> {
+def : Pat<(i8 (opNode (v8i8 FPR64:$Rn))),
+          (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) FPR64:$Rn)>;
+
+def : Pat<(i8 (opNode (v16i8 FPR128:$Rn))),
+          (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) FPR128:$Rn)>;
+
+def : Pat<(i16 (opNode (v4i16 FPR64:$Rn))),
+          (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) FPR64:$Rn)>;
+
+def : Pat<(i16 (opNode (v8i16 FPR128:$Rn))),
+          (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) FPR128:$Rn)>;
+
+def : Pat<(i32 (opNode (v4i32 V128:$Rn))), 
+          (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn)>;
+
+}
+
+defm : SIMDAcrossLanesVecReductionIntrinsic<"UMINV", vecreduce_umin>;
+def : Pat<(i32 (vecreduce_umin (v2i32 V64:$Rn))), 
+          (i32 (EXTRACT_SUBREG (UMINPv2i32 V64:$Rn, V64:$Rn), ssub))>;
+
+defm : SIMDAcrossLanesVecReductionIntrinsic<"UMAXV", vecreduce_umax>;
+def : Pat<(i32 (vecreduce_umax (v2i32 V64:$Rn))), 
+          (i32 (EXTRACT_SUBREG (UMAXPv2i32 V64:$Rn, V64:$Rn), ssub))>;
+
+defm : SIMDAcrossLanesVecReductionIntrinsic<"SMINV", vecreduce_smin>;
+def : Pat<(i32 (vecreduce_smin (v2i32 V64:$Rn))), 
+          (i32 (EXTRACT_SUBREG (SMINPv2i32 V64:$Rn, V64:$Rn), ssub))>;
+
+defm : SIMDAcrossLanesVecReductionIntrinsic<"SMAXV", vecreduce_smax>;
+def : Pat<(i32 (vecreduce_smax (v2i32 V64:$Rn))), 
+          (i32 (EXTRACT_SUBREG (SMAXPv2i32 V64:$Rn, V64:$Rn), ssub))>;
+
 multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
   def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
         (i32 (SMOVvi16to32
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 598a195d4fb1016..38f1c3f1dacf972 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -967,6 +967,21 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .scalarize(1)
       .lower();
 
+  getActionDefinitionsBuilder(
+      {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX})
+      .legalFor({{s8, v8s8},
+                 {s8, v16s8},
+                 {s16, v4s16},
+                 {s16, v8s16},
+                 {s32, v2s32},
+                 {s32, v4s32}})
+      .clampMaxNumElements(1, s64, 2)
+      .clampMaxNumElements(1, s32, 4)
+      .clampMaxNumElements(1, s16, 8)
+      .clampMaxNumElements(1, s8, 16)
+      .scalarize(1)
+      .lower();
+
   getActionDefinitionsBuilder(
       {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
       // Try to break down into smaller vectors as long as they're at least 64
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index d5f7507ec5dd767..3cfaa927860a877 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -769,17 +769,20 @@
 # DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_SMAX (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_SMIN (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_UMAX (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_VECREDUCE_UMIN (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
-# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
-# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
+# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
 # DEBUG-NEXT: G_SBFX (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
 # DEBUG-NEXT: .. the first uncovered type index: 2, OK
 # DEBUG-NEXT: .. the first uncovered imm index: 0, OK
diff --git a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
index f5d7d330b45c449..df35b4ecb3d6623 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-minmaxv.ll
@@ -1,228 +1,1933 @@
-; RUN: llc < %s -mtriple=aarch64-linux--gnu -aarch64-neon-syntax=generic | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
 
-declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
-declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
-declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
-declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
-declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
-declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
+; CHECK-GI:         warning: Instruction selection used fallback path for sminv_v3i64
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for smaxv_v3i64
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for uminv_v3i64
+; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for umaxv_v3i64
 
+declare i8 @llvm.vector.reduce.smin.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.smin.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.smin.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.smin.v8i8(<8 x i8>)
 declare i8 @llvm.vector.reduce.smin.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.smin.v32i8(<32 x i8>)
+declare i16 @llvm.vector.reduce.smin.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.smin.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.smin.v4i16(<4 x i16>)
 declare i16 @llvm.vector.reduce.smin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
+declare i32 @llvm.vector.reduce.smin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smin.v3i32(<3 x i32>)
 declare i32 @llvm.vector.reduce.smin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smin.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>)
+declare i64 @llvm.vector.reduce.smin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smin.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.smin.v4i64(<4 x i64>)
+declare i128 @llvm.vector.reduce.smin.v2i128(<2 x i128>)
+declare i8 @llvm.vector.reduce.smax.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.smax.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.smax.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.smax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.smax.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.smax.v32i8(<32 x i8>)
+declare i16 @llvm.vector.reduce.smax.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.smax.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.smax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.smax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>)
+declare i32 @llvm.vector.reduce.smax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.smax.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.smax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.smax.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>)
+declare i64 @llvm.vector.reduce.smax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.smax.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.smax.v4i64(<4 x i64>)
+declare i128 @llvm.vector.reduce.smax.v2i128(<2 x i128>)
+declare i8 @llvm.vector.reduce.umin.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.umin.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.umin.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.umin.v8i8(<8 x i8>)
 declare i8 @llvm.vector.reduce.umin.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.umin.v32i8(<32 x i8>)
+declare i16 @llvm.vector.reduce.umin.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.umin.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.umin.v4i16(<4 x i16>)
 declare i16 @llvm.vector.reduce.umin.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>)
+declare i32 @llvm.vector.reduce.umin.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umin.v3i32(<3 x i32>)
 declare i32 @llvm.vector.reduce.umin.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umin.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>)
+declare i64 @llvm.vector.reduce.umin.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umin.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.umin.v4i64(<4 x i64>)
+declare i128 @llvm.vector.reduce.umin.v2i128(<2 x i128>)
+declare i8 @llvm.vector.reduce.umax.v2i8(<2 x i8>)
+declare i8 @llvm.vector.reduce.umax.v3i8(<3 x i8>)
+declare i8 @llvm.vector.reduce.umax.v4i8(<4 x i8>)
+declare i8 @llvm.vector.reduce.umax.v8i8(<8 x i8>)
+declare i8 @llvm.vector.reduce.umax.v16i8(<16 x i8>)
+declare i8 @llvm.vector.reduce.umax.v32i8(<32 x i8>)
+declare i16 @llvm.vector.reduce.umax.v2i16(<2 x i16>)
+declare i16 @llvm.vector.reduce.umax.v3i16(<3 x i16>)
+declare i16 @llvm.vector.reduce.umax.v4i16(<4 x i16>)
+declare i16 @llvm.vector.reduce.umax.v8i16(<8 x i16>)
+declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>)
+declare i32 @llvm.vector.reduce.umax.v2i32(<2 x i32>)
+declare i32 @llvm.vector.reduce.umax.v3i32(<3 x i32>)
+declare i32 @llvm.vector.reduce.umax.v4i32(<4 x i32>)
+declare i32 @llvm.vector.reduce.umax.v8i32(<8 x i32>)
+declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>)
+declare i64 @llvm.vector.reduce.umax.v2i64(<2 x i64>)
+declare i64 @llvm.vector.reduce.umax.v3i64(<3 x i64>)
+declare i64 @llvm.vector.reduce.umax.v4i64(<4 x i64>)
+declare i128 @llvm.vector.reduce.umax.v2i128(<2 x i128>)
 
 declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>)
 declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
 
-; CHECK-LABEL: smax_B
-; CHECK: smaxv {{b[0-9]+}}, {{v[0-9]+}}.16b
 define i8 @smax_B(ptr nocapture readonly %arr)  {
+; CHECK-LABEL: smax_B:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    smaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
-; CHECK-LABEL: smax_H
-; CHECK: smaxv {{h[0-9]+}}, {{v[0-9]+}}.8h
 define i16 @smax_H(ptr nocapture readonly %arr) {
+; CHECK-LABEL: smax_H:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    smaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
-; CHECK-LABEL: smax_S
-; CHECK: smaxv {{s[0-9]+}}, {{v[0-9]+}}.4s
 define i32 @smax_S(ptr nocapture readonly %arr)  {
+; CHECK-LABEL: smax_S:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    smaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
-; CHECK-LABEL: umax_B
-; CHECK: umaxv {{b[0-9]+}}, {{v[0-9]+}}.16b
 define i8 @umax_B(ptr nocapture readonly %arr)  {
+; CHECK-LABEL: umax_B:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    umaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
-; CHECK-LABEL: umax_H
-; CHECK: umaxv {{h[0-9]+}}, {{v[0-9]+}}.8h
 define i16 @umax_H(ptr nocapture readonly %arr)  {
+; CHECK-LABEL: umax_H:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    umaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
-; CHECK-LABEL: umax_S
-; CHECK: umaxv {{s[0-9]+}}, {{v[0-9]+}}.4s
 define i32 @umax_S(ptr nocapture readonly %arr) {
+; CHECK-LABEL: umax_S:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    umaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
-; CHECK-LABEL: smin_B
-; CHECK: sminv {{b[0-9]+}}, {{v[0-9]+}}.16b
 define i8 @smin_B(ptr nocapture readonly %arr) {
+; CHECK-LABEL: smin_B:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
-; CHECK-LABEL: smin_H
-; CHECK: sminv {{h[0-9]+}}, {{v[0-9]+}}.8h
 define i16 @smin_H(ptr nocapture readonly %arr) {
+; CHECK-LABEL: smin_H:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
-; CHECK-LABEL: smin_S
-; CHECK: sminv {{s[0-9]+}}, {{v[0-9]+}}.4s
 define i32 @smin_S(ptr nocapture readonly %arr) {
+; CHECK-LABEL: smin_S:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    sminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
-; CHECK-LABEL: umin_B
-; CHECK: uminv {{b[0-9]+}}, {{v[0-9]+}}.16b
 define i8 @umin_B(ptr nocapture readonly %arr)  {
+; CHECK-LABEL: umin_B:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <16 x i8>, ptr %arr
   %r = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %arr.load)
   ret i8 %r
 }
 
-; CHECK-LABEL: umin_H
-; CHECK: uminv {{h[0-9]+}}, {{v[0-9]+}}.8h
 define i16 @umin_H(ptr nocapture readonly %arr)  {
+; CHECK-LABEL: umin_H:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <8 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %arr.load)
   ret i16 %r
 }
 
-; CHECK-LABEL: umin_S
-; CHECK: uminv {{s[0-9]+}}, {{v[0-9]+}}.4s
 define i32 @umin_S(ptr nocapture readonly %arr) {
+; CHECK-LABEL: umin_S:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    uminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
   %arr.load = load <4 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %arr.load)
   ret i32 %r
 }
 
-; CHECK-LABEL: fmaxnm_S
-; CHECK: fmaxnmv
 define float @fmaxnm_S(ptr nocapture readonly %arr) {
+; CHECK-LABEL: fmaxnm_S:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    fmaxnmv s0, v0.4s
+; CHECK-NEXT:    ret
   %arr.load  = load <4 x float>, ptr %arr
   %r = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %arr.load)
   ret float %r
 }
 
-; CHECK-LABEL: fminnm_S
-; CHECK: fminnmv
 define float @fminnm_S(ptr nocapture readonly %arr) {
+; CHECK-LABEL: fminnm_S:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    fminnmv s0, v0.4s
+; CHECK-NEXT:    ret
   %arr.load  = load <4 x float>, ptr %arr
   %r = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %arr.load)
   ret float %r
 }
 
-declare i16 @llvm.vector.reduce.umax.v16i16(<16 x i16>)
-
 define i16 @oversized_umax_256(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_umax_256
-; CHECK: umax [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK: umaxv {{h[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_umax_256:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q1, q0, [x0]
+; CHECK-SD-NEXT:    umax v0.8h, v1.8h, v0.8h
+; CHECK-SD-NEXT:    umaxv h0, v0.8h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_umax_256:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    umax v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT:    umaxv h0, v0.8h
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
-declare i32 @llvm.vector.reduce.umax.v16i32(<16 x i32>)
-
 define i32 @oversized_umax_512(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_umax_512
-; CHECK: umax v
-; CHECK-NEXT: umax v
-; CHECK-NEXT: umax [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-; CHECK-NEXT: umaxv {{s[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_umax_512:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-SD-NEXT:    ldp q2, q3, [x0]
+; CHECK-SD-NEXT:    umax v1.4s, v3.4s, v1.4s
+; CHECK-SD-NEXT:    umax v0.4s, v2.4s, v0.4s
+; CHECK-SD-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    umaxv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_umax_512:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x0, #32]
+; CHECK-GI-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    umax v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    umaxv s0, v0.4s
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }
 
-declare i16 @llvm.vector.reduce.umin.v16i16(<16 x i16>)
-
 define i16 @oversized_umin_256(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_umin_256
-; CHECK: umin [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK: uminv {{h[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_umin_256:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q1, q0, [x0]
+; CHECK-SD-NEXT:    umin v0.8h, v1.8h, v0.8h
+; CHECK-SD-NEXT:    uminv h0, v0.8h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_umin_256:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    umin v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT:    uminv h0, v0.8h
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
-declare i32 @llvm.vector.reduce.umin.v16i32(<16 x i32>)
-
 define i32 @oversized_umin_512(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_umin_512
-; CHECK: umin v
-; CHECK-NEXT: umin v
-; CHECK-NEXT: umin [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-; CHECK-NEXT: uminv {{s[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_umin_512:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-SD-NEXT:    ldp q2, q3, [x0]
+; CHECK-SD-NEXT:    umin v1.4s, v3.4s, v1.4s
+; CHECK-SD-NEXT:    umin v0.4s, v2.4s, v0.4s
+; CHECK-SD-NEXT:    umin v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    uminv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_umin_512:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x0, #32]
+; CHECK-GI-NEXT:    umin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    umin v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT:    umin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    uminv s0, v0.4s
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.umin.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }
 
-declare i16 @llvm.vector.reduce.smax.v16i16(<16 x i16>)
-
 define i16 @oversized_smax_256(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_smax_256
-; CHECK: smax [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK: smaxv {{h[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_smax_256:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q1, q0, [x0]
+; CHECK-SD-NEXT:    smax v0.8h, v1.8h, v0.8h
+; CHECK-SD-NEXT:    smaxv h0, v0.8h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_smax_256:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    smax v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT:    smaxv h0, v0.8h
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
-declare i32 @llvm.vector.reduce.smax.v16i32(<16 x i32>)
-
 define i32 @oversized_smax_512(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_smax_512
-; CHECK: smax v
-; CHECK-NEXT: smax v
-; CHECK-NEXT: smax [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-; CHECK-NEXT: smaxv {{s[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_smax_512:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-SD-NEXT:    ldp q2, q3, [x0]
+; CHECK-SD-NEXT:    smax v1.4s, v3.4s, v1.4s
+; CHECK-SD-NEXT:    smax v0.4s, v2.4s, v0.4s
+; CHECK-SD-NEXT:    smax v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    smaxv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_smax_512:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x0, #32]
+; CHECK-GI-NEXT:    smax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    smax v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT:    smax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    smaxv s0, v0.4s
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smax.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }
 
-declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
-
 define i16 @oversized_smin_256(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_smin_256
-; CHECK: smin [[V0:v[0-9]+]].8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
-; CHECK: sminv {{h[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_smin_256:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q1, q0, [x0]
+; CHECK-SD-NEXT:    smin v0.8h, v1.8h, v0.8h
+; CHECK-SD-NEXT:    sminv h0, v0.8h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_smin_256:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    smin v0.8h, v0.8h, v1.8h
+; CHECK-GI-NEXT:    sminv h0, v0.8h
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i16>, ptr %arr
   %r = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %arr.load)
   ret i16 %r
 }
 
-declare i32 @llvm.vector.reduce.smin.v16i32(<16 x i32>)
 
 define i32 @oversized_smin_512(ptr nocapture readonly %arr)  {
-; CHECK-LABEL: oversized_smin_512
-; CHECK: smin v
-; CHECK-NEXT: smin v
-; CHECK-NEXT: smin [[V0:v[0-9]+]].4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
-; CHECK-NEXT: sminv {{s[0-9]+}}, [[V0]]
+; CHECK-SD-LABEL: oversized_smin_512:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ldp q0, q1, [x0, #32]
+; CHECK-SD-NEXT:    ldp q2, q3, [x0]
+; CHECK-SD-NEXT:    smin v1.4s, v3.4s, v1.4s
+; CHECK-SD-NEXT:    smin v0.4s, v2.4s, v0.4s
+; CHECK-SD-NEXT:    smin v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    sminv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: oversized_smin_512:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    ldp q0, q1, [x0]
+; CHECK-GI-NEXT:    ldp q2, q3, [x0, #32]
+; CHECK-GI-NEXT:    smin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    smin v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT:    smin v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    sminv s0, v0.4s
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %arr.load = load <16 x i32>, ptr %arr
   %r = call i32 @llvm.vector.reduce.smin.v16i32(<16 x i32> %arr.load)
   ret i32 %r
 }
+
+define i8 @sminv_v2i8(<2 x i8> %a) {
+; CHECK-SD-LABEL: sminv_v2i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    shl v0.2s, v0.2s, #24
+; CHECK-SD-NEXT:    sshr v0.2s, v0.2s, #24
+; CHECK-SD-NEXT:    sminp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v2i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    sxtb w8, w8
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, sxtb
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, lt
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smin.v2i8(<2 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @sminv_v3i8(<3 x i8> %a) {
+; CHECK-SD-LABEL: sminv_v3i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi v0.4h, #127
+; CHECK-SD-NEXT:    mov v0.h[0], w0
+; CHECK-SD-NEXT:    mov v0.h[1], w1
+; CHECK-SD-NEXT:    mov v0.h[2], w2
+; CHECK-SD-NEXT:    shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    sminv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v3i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    sxtb w8, w0
+; CHECK-GI-NEXT:    cmp w8, w1, sxtb
+; CHECK-GI-NEXT:    csel w8, w0, w1, lt
+; CHECK-GI-NEXT:    sxtb w9, w8
+; CHECK-GI-NEXT:    cmp w9, w2, sxtb
+; CHECK-GI-NEXT:    csel w0, w8, w2, lt
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smin.v3i8(<3 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @sminv_v4i8(<4 x i8> %a) {
+; CHECK-SD-LABEL: sminv_v4i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    sminv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v4i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NEXT:    sxtb w9, w8
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    cmp w9, w10, sxtb
+; CHECK-GI-NEXT:    sxtb w9, w11
+; CHECK-GI-NEXT:    csel w8, w8, w10, lt
+; CHECK-GI-NEXT:    fmov w10, s3
+; CHECK-GI-NEXT:    cmp w9, w10, sxtb
+; CHECK-GI-NEXT:    sxtb w9, w8
+; CHECK-GI-NEXT:    csel w10, w11, w10, lt
+; CHECK-GI-NEXT:    cmp w9, w10, sxtb
+; CHECK-GI-NEXT:    csel w0, w8, w10, lt
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smin.v4i8(<4 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @sminv_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: sminv_v8i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sminv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smin.v8i8(<8 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @sminv_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: sminv_v16i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smin.v16i8(<16 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @sminv_v32i8(<32 x i8> %a) {
+; CHECK-LABEL: sminv_v32i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smin v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    sminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smin.v32i8(<32 x i8> %a)
+  ret i8 %arg1
+}
+
+define i16 @sminv_v2i16(<2 x i16> %a) {
+; CHECK-SD-LABEL: sminv_v2i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    shl v0.2s, v0.2s, #16
+; CHECK-SD-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT:    sminp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v2i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    sxth w8, w8
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, sxth
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, lt
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smin.v2i16(<2 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @sminv_v3i16(<3 x i16> %a) {
+; CHECK-SD-LABEL: sminv_v3i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    mov w8, #32767 // =0x7fff
+; CHECK-SD-NEXT:    mov v0.h[3], w8
+; CHECK-SD-NEXT:    sminv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v3i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w9, s0
+; CHECK-GI-NEXT:    sxth w8, w8
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    fmov w12, s1
+; CHECK-GI-NEXT:    cmp w8, w10, sxth
+; CHECK-GI-NEXT:    sxth w8, w11
+; CHECK-GI-NEXT:    fmov w10, s2
+; CHECK-GI-NEXT:    csel w9, w9, w12, lt
+; CHECK-GI-NEXT:    cmp w8, w9, sxth
+; CHECK-GI-NEXT:    csel w0, w9, w10, gt
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smin.v3i16(<3 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @sminv_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: sminv_v4i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sminv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smin.v4i16(<4 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @sminv_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: sminv_v8i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smin.v8i16(<8 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @sminv_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: sminv_v16i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smin v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    sminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %a)
+  ret i16 %arg1
+}
+
+define i32 @sminv_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: sminv_v2i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sminp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smin.v2i32(<2 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @sminv_v3i32(<3 x i32> %a) {
+; CHECK-SD-LABEL: sminv_v3i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    mov w8, #2147483647 // =0x7fffffff
+; CHECK-SD-NEXT:    mov v0.s[3], w8
+; CHECK-SD-NEXT:    sminv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v3i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov s2, v0.s[2]
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fmov w9, s2
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, lt
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fcsel s0, s0, s2, lt
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smin.v3i32(<3 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @sminv_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: sminv_v4i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @sminv_v8i32(<8 x i32> %a) {
+; CHECK-LABEL: sminv_v8i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smin v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    sminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smin.v8i32(<8 x i32> %a)
+  ret i32 %arg1
+}
+
+define i64 @sminv_v2i64(<2 x i64> %a) {
+; CHECK-SD-LABEL: sminv_v2i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmgt d2, d1, d0
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v2i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, lt
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @sminv_v3i64(<3 x i64> %a) {
+; CHECK-LABEL: sminv_v3i64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov x8, #9223372036854775807 // =0x7fffffffffffffff
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    mov v2.d[1], x8
+; CHECK-NEXT:    cmgt v1.2d, v2.2d, v0.2d
+; CHECK-NEXT:    bif v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    cmgt d2, d1, d0
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.smin.v3i64(<3 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @sminv_v4i64(<4 x i64> %a) {
+; CHECK-SD-LABEL: sminv_v4i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmgt v2.2d, v1.2d, v0.2d
+; CHECK-SD-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmgt d2, d1, d0
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v4i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmgt v2.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, lt
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.smin.v4i64(<4 x i64> %a)
+  ret i64 %arg1
+}
+
+define i128 @sminv_v2i128(<2 x i128> %a) {
+; CHECK-SD-LABEL: sminv_v2i128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmp x0, x2
+; CHECK-SD-NEXT:    sbcs xzr, x1, x3
+; CHECK-SD-NEXT:    csel x0, x0, x2, lt
+; CHECK-SD-NEXT:    csel x1, x1, x3, lt
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sminv_v2i128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    cset w8, lt
+; CHECK-GI-NEXT:    cmp x0, x2
+; CHECK-GI-NEXT:    cset w9, lo
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    csel w8, w9, w8, eq
+; CHECK-GI-NEXT:    tst w8, #0x1
+; CHECK-GI-NEXT:    csel x0, x0, x2, ne
+; CHECK-GI-NEXT:    csel x1, x1, x3, ne
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i128 @llvm.vector.reduce.smin.v2i128(<2 x i128> %a)
+  ret i128 %arg1
+}
+
+define i8 @smaxv_v2i8(<2 x i8> %a) {
+; CHECK-SD-LABEL: smaxv_v2i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    shl v0.2s, v0.2s, #24
+; CHECK-SD-NEXT:    sshr v0.2s, v0.2s, #24
+; CHECK-SD-NEXT:    smaxp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v2i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    sxtb w8, w8
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, sxtb
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, gt
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smax.v2i8(<2 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @smaxv_v3i8(<3 x i8> %a) {
+; CHECK-SD-LABEL: smaxv_v3i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi v0.4h, #128
+; CHECK-SD-NEXT:    mov v0.h[0], w0
+; CHECK-SD-NEXT:    mov v0.h[1], w1
+; CHECK-SD-NEXT:    mov v0.h[2], w2
+; CHECK-SD-NEXT:    shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    smaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v3i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    sxtb w8, w0
+; CHECK-GI-NEXT:    cmp w8, w1, sxtb
+; CHECK-GI-NEXT:    csel w8, w0, w1, gt
+; CHECK-GI-NEXT:    sxtb w9, w8
+; CHECK-GI-NEXT:    cmp w9, w2, sxtb
+; CHECK-GI-NEXT:    csel w0, w8, w2, gt
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smax.v3i8(<3 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @smaxv_v4i8(<4 x i8> %a) {
+; CHECK-SD-LABEL: smaxv_v4i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    shl v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    sshr v0.4h, v0.4h, #8
+; CHECK-SD-NEXT:    smaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v4i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NEXT:    sxtb w9, w8
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    cmp w9, w10, sxtb
+; CHECK-GI-NEXT:    sxtb w9, w11
+; CHECK-GI-NEXT:    csel w8, w8, w10, gt
+; CHECK-GI-NEXT:    fmov w10, s3
+; CHECK-GI-NEXT:    cmp w9, w10, sxtb
+; CHECK-GI-NEXT:    sxtb w9, w8
+; CHECK-GI-NEXT:    csel w10, w11, w10, gt
+; CHECK-GI-NEXT:    cmp w9, w10, sxtb
+; CHECK-GI-NEXT:    csel w0, w8, w10, gt
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smax.v4i8(<4 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @smaxv_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: smaxv_v8i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smaxv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smax.v8i8(<8 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @smaxv_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: smaxv_v16i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smax.v16i8(<16 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @smaxv_v32i8(<32 x i8> %a) {
+; CHECK-LABEL: smaxv_v32i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smax v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    smaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.smax.v32i8(<32 x i8> %a)
+  ret i8 %arg1
+}
+
+define i16 @smaxv_v2i16(<2 x i16> %a) {
+; CHECK-SD-LABEL: smaxv_v2i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    shl v0.2s, v0.2s, #16
+; CHECK-SD-NEXT:    sshr v0.2s, v0.2s, #16
+; CHECK-SD-NEXT:    smaxp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v2i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    sxth w8, w8
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, sxth
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, gt
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smax.v2i16(<2 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @smaxv_v3i16(<3 x i16> %a) {
+; CHECK-SD-LABEL: smaxv_v3i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    mov w8, #-32768 // =0xffff8000
+; CHECK-SD-NEXT:    mov v0.h[3], w8
+; CHECK-SD-NEXT:    smaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v3i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w9, s0
+; CHECK-GI-NEXT:    sxth w8, w8
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    fmov w12, s1
+; CHECK-GI-NEXT:    cmp w8, w10, sxth
+; CHECK-GI-NEXT:    sxth w8, w11
+; CHECK-GI-NEXT:    fmov w10, s2
+; CHECK-GI-NEXT:    csel w9, w9, w12, gt
+; CHECK-GI-NEXT:    cmp w8, w9, sxth
+; CHECK-GI-NEXT:    csel w0, w9, w10, lt
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smax.v3i16(<3 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @smaxv_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: smaxv_v4i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smaxv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smax.v4i16(<4 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @smaxv_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: smaxv_v8i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smax.v8i16(<8 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @smaxv_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: smaxv_v16i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smax v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    smaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.smax.v16i16(<16 x i16> %a)
+  ret i16 %arg1
+}
+
+define i32 @smaxv_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: smaxv_v2i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smaxp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smax.v2i32(<2 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @smaxv_v3i32(<3 x i32> %a) {
+; CHECK-SD-LABEL: smaxv_v3i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    mov w8, #-2147483648 // =0x80000000
+; CHECK-SD-NEXT:    mov v0.s[3], w8
+; CHECK-SD-NEXT:    smaxv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v3i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov s2, v0.s[2]
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fmov w9, s2
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, gt
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fcsel s0, s0, s2, gt
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smax.v3i32(<3 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @smaxv_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: smaxv_v4i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @smaxv_v8i32(<8 x i32> %a) {
+; CHECK-LABEL: smaxv_v8i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    smax v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    smaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.smax.v8i32(<8 x i32> %a)
+  ret i32 %arg1
+}
+
+define i64 @smaxv_v2i64(<2 x i64> %a) {
+; CHECK-SD-LABEL: smaxv_v2i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmgt d2, d0, d1
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v2i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, gt
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @smaxv_v3i64(<3 x i64> %a) {
+; CHECK-LABEL: smaxv_v3i64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov x8, #-9223372036854775808 // =0x8000000000000000
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    mov v2.d[1], x8
+; CHECK-NEXT:    cmgt v1.2d, v0.2d, v2.2d
+; CHECK-NEXT:    bif v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    cmgt d2, d0, d1
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.smax.v3i64(<3 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @smaxv_v4i64(<4 x i64> %a) {
+; CHECK-SD-LABEL: smaxv_v4i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmgt v2.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmgt d2, d0, d1
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v4i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmgt v2.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, gt
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.smax.v4i64(<4 x i64> %a)
+  ret i64 %arg1
+}
+
+define i128 @smaxv_v2i128(<2 x i128> %a) {
+; CHECK-SD-LABEL: smaxv_v2i128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmp x2, x0
+; CHECK-SD-NEXT:    sbcs xzr, x3, x1
+; CHECK-SD-NEXT:    csel x0, x0, x2, lt
+; CHECK-SD-NEXT:    csel x1, x1, x3, lt
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: smaxv_v2i128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    cset w8, gt
+; CHECK-GI-NEXT:    cmp x0, x2
+; CHECK-GI-NEXT:    cset w9, hi
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    csel w8, w9, w8, eq
+; CHECK-GI-NEXT:    tst w8, #0x1
+; CHECK-GI-NEXT:    csel x0, x0, x2, ne
+; CHECK-GI-NEXT:    csel x1, x1, x3, ne
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i128 @llvm.vector.reduce.smax.v2i128(<2 x i128> %a)
+  ret i128 %arg1
+}
+
+define i8 @uminv_v2i8(<2 x i8> %a) {
+; CHECK-SD-LABEL: uminv_v2i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi d1, #0x0000ff000000ff
+; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    uminp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v2i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    and w8, w8, #0xff
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, lo
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umin.v2i8(<2 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @uminv_v3i8(<3 x i8> %a) {
+; CHECK-SD-LABEL: uminv_v3i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi d0, #0xff00ff00ff00ff
+; CHECK-SD-NEXT:    mov v0.h[0], w0
+; CHECK-SD-NEXT:    mov v0.h[1], w1
+; CHECK-SD-NEXT:    mov v0.h[2], w2
+; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    uminv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v3i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    and w8, w0, #0xff
+; CHECK-GI-NEXT:    cmp w8, w1, uxtb
+; CHECK-GI-NEXT:    csel w8, w0, w1, lo
+; CHECK-GI-NEXT:    and w9, w8, #0xff
+; CHECK-GI-NEXT:    cmp w9, w2, uxtb
+; CHECK-GI-NEXT:    csel w0, w8, w2, lo
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umin.v3i8(<3 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @uminv_v4i8(<4 x i8> %a) {
+; CHECK-SD-LABEL: uminv_v4i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    uminv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v4i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    fmov w12, s3
+; CHECK-GI-NEXT:    and w9, w8, #0xff
+; CHECK-GI-NEXT:    cmp w9, w10, uxtb
+; CHECK-GI-NEXT:    and w9, w11, #0xff
+; CHECK-GI-NEXT:    csel w8, w8, w10, lo
+; CHECK-GI-NEXT:    cmp w9, w12, uxtb
+; CHECK-GI-NEXT:    csel w9, w11, w12, lo
+; CHECK-GI-NEXT:    and w10, w8, #0xff
+; CHECK-GI-NEXT:    cmp w10, w9, uxtb
+; CHECK-GI-NEXT:    csel w0, w8, w9, lo
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umin.v4i8(<4 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @uminv_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: uminv_v8i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uminv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umin.v8i8(<8 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @uminv_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: uminv_v16i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umin.v16i8(<16 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @uminv_v32i8(<32 x i8> %a) {
+; CHECK-LABEL: uminv_v32i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umin v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    uminv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umin.v32i8(<32 x i8> %a)
+  ret i8 %arg1
+}
+
+define i16 @uminv_v2i16(<2 x i16> %a) {
+; CHECK-SD-LABEL: uminv_v2i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    uminp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v2i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    and w8, w8, #0xffff
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, uxth
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, lo
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umin.v2i16(<2 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @uminv_v3i16(<3 x i16> %a) {
+; CHECK-SD-LABEL: uminv_v3i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    mov w8, #-1 // =0xffffffff
+; CHECK-SD-NEXT:    mov v0.h[3], w8
+; CHECK-SD-NEXT:    uminv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v3i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w9, s0
+; CHECK-GI-NEXT:    uxth w8, w8
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    fmov w12, s1
+; CHECK-GI-NEXT:    cmp w8, w10, uxth
+; CHECK-GI-NEXT:    uxth w8, w11
+; CHECK-GI-NEXT:    fmov w10, s2
+; CHECK-GI-NEXT:    csel w9, w9, w12, lo
+; CHECK-GI-NEXT:    cmp w8, w9, uxth
+; CHECK-GI-NEXT:    csel w0, w9, w10, hi
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umin.v3i16(<3 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @uminv_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: uminv_v4i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uminv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umin.v4i16(<4 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @uminv_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: uminv_v8i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umin.v8i16(<8 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @uminv_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: uminv_v16i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umin v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    uminv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umin.v16i16(<16 x i16> %a)
+  ret i16 %arg1
+}
+
+define i32 @uminv_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: uminv_v2i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uminp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umin.v2i32(<2 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @uminv_v3i32(<3 x i32> %a) {
+; CHECK-SD-LABEL: uminv_v3i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    mov w8, #-1 // =0xffffffff
+; CHECK-SD-NEXT:    mov v0.s[3], w8
+; CHECK-SD-NEXT:    uminv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v3i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov s2, v0.s[2]
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fmov w9, s2
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, lo
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fcsel s0, s0, s2, lo
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umin.v3i32(<3 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @uminv_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: uminv_v4i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    uminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @uminv_v8i32(<8 x i32> %a) {
+; CHECK-LABEL: uminv_v8i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umin v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    uminv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umin.v8i32(<8 x i32> %a)
+  ret i32 %arg1
+}
+
+define i64 @uminv_v2i64(<2 x i64> %a) {
+; CHECK-SD-LABEL: uminv_v2i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmhi d2, d1, d0
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v2i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, lo
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @uminv_v3i64(<3 x i64> %a) {
+; CHECK-LABEL: uminv_v3i64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov x8, #-1 // =0xffffffffffffffff
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    mov v2.d[1], x8
+; CHECK-NEXT:    cmhi v1.2d, v2.2d, v0.2d
+; CHECK-NEXT:    bif v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    cmhi d2, d1, d0
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.umin.v3i64(<3 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @uminv_v4i64(<4 x i64> %a) {
+; CHECK-SD-LABEL: uminv_v4i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmhi v2.2d, v1.2d, v0.2d
+; CHECK-SD-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmhi d2, d1, d0
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v4i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmhi v2.2d, v1.2d, v0.2d
+; CHECK-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, lo
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.umin.v4i64(<4 x i64> %a)
+  ret i64 %arg1
+}
+
+define i128 @uminv_v2i128(<2 x i128> %a) {
+; CHECK-SD-LABEL: uminv_v2i128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmp x0, x2
+; CHECK-SD-NEXT:    sbcs xzr, x1, x3
+; CHECK-SD-NEXT:    csel x0, x0, x2, lo
+; CHECK-SD-NEXT:    csel x1, x1, x3, lo
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: uminv_v2i128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    cset w8, lo
+; CHECK-GI-NEXT:    cmp x0, x2
+; CHECK-GI-NEXT:    cset w9, lo
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    csel w8, w9, w8, eq
+; CHECK-GI-NEXT:    tst w8, #0x1
+; CHECK-GI-NEXT:    csel x0, x0, x2, ne
+; CHECK-GI-NEXT:    csel x1, x1, x3, ne
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i128 @llvm.vector.reduce.umin.v2i128(<2 x i128> %a)
+  ret i128 %arg1
+}
+
+define i8 @umaxv_v2i8(<2 x i8> %a) {
+; CHECK-SD-LABEL: umaxv_v2i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi d1, #0x0000ff000000ff
+; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    umaxp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v2i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    and w8, w8, #0xff
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, hi
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umax.v2i8(<2 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @umaxv_v3i8(<3 x i8> %a) {
+; CHECK-SD-LABEL: umaxv_v3i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT:    mov v0.h[0], w0
+; CHECK-SD-NEXT:    mov v0.h[1], w1
+; CHECK-SD-NEXT:    mov v0.h[2], w2
+; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    umaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v3i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    and w8, w0, #0xff
+; CHECK-GI-NEXT:    cmp w8, w1, uxtb
+; CHECK-GI-NEXT:    csel w8, w0, w1, hi
+; CHECK-GI-NEXT:    and w9, w8, #0xff
+; CHECK-GI-NEXT:    cmp w9, w2, uxtb
+; CHECK-GI-NEXT:    csel w0, w8, w2, hi
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umax.v3i8(<3 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @umaxv_v4i8(<4 x i8> %a) {
+; CHECK-SD-LABEL: umaxv_v4i8:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    umaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v4i8:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    fmov w12, s3
+; CHECK-GI-NEXT:    and w9, w8, #0xff
+; CHECK-GI-NEXT:    cmp w9, w10, uxtb
+; CHECK-GI-NEXT:    and w9, w11, #0xff
+; CHECK-GI-NEXT:    csel w8, w8, w10, hi
+; CHECK-GI-NEXT:    cmp w9, w12, uxtb
+; CHECK-GI-NEXT:    csel w9, w11, w12, hi
+; CHECK-GI-NEXT:    and w10, w8, #0xff
+; CHECK-GI-NEXT:    cmp w10, w9, uxtb
+; CHECK-GI-NEXT:    csel w0, w8, w9, hi
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umax.v4i8(<4 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @umaxv_v8i8(<8 x i8> %a) {
+; CHECK-LABEL: umaxv_v8i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umaxv b0, v0.8b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umax.v8i8(<8 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @umaxv_v16i8(<16 x i8> %a) {
+; CHECK-LABEL: umaxv_v16i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umax.v16i8(<16 x i8> %a)
+  ret i8 %arg1
+}
+
+define i8 @umaxv_v32i8(<32 x i8> %a) {
+; CHECK-LABEL: umaxv_v32i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umax v0.16b, v0.16b, v1.16b
+; CHECK-NEXT:    umaxv b0, v0.16b
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i8 @llvm.vector.reduce.umax.v32i8(<32 x i8> %a)
+  ret i8 %arg1
+}
+
+define i16 @umaxv_v2i16(<2 x i16> %a) {
+; CHECK-SD-LABEL: umaxv_v2i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    movi d1, #0x00ffff0000ffff
+; CHECK-SD-NEXT:    and v0.8b, v0.8b, v1.8b
+; CHECK-SD-NEXT:    umaxp v0.2s, v0.2s, v0.2s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v2i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    and w8, w8, #0xffff
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9, uxth
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, hi
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umax.v2i16(<2 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @umaxv_v3i16(<3 x i16> %a) {
+; CHECK-SD-LABEL: umaxv_v3i16:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    mov v0.h[3], wzr
+; CHECK-SD-NEXT:    umaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v3i16:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w9, s0
+; CHECK-GI-NEXT:    uxth w8, w8
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    fmov w12, s1
+; CHECK-GI-NEXT:    cmp w8, w10, uxth
+; CHECK-GI-NEXT:    uxth w8, w11
+; CHECK-GI-NEXT:    fmov w10, s2
+; CHECK-GI-NEXT:    csel w9, w9, w12, hi
+; CHECK-GI-NEXT:    cmp w8, w9, uxth
+; CHECK-GI-NEXT:    csel w0, w9, w10, lo
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umax.v3i16(<3 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @umaxv_v4i16(<4 x i16> %a) {
+; CHECK-LABEL: umaxv_v4i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umaxv h0, v0.4h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umax.v4i16(<4 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @umaxv_v8i16(<8 x i16> %a) {
+; CHECK-LABEL: umaxv_v8i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umax.v8i16(<8 x i16> %a)
+  ret i16 %arg1
+}
+
+define i16 @umaxv_v16i16(<16 x i16> %a) {
+; CHECK-LABEL: umaxv_v16i16:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umax v0.8h, v0.8h, v1.8h
+; CHECK-NEXT:    umaxv h0, v0.8h
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i16 @llvm.vector.reduce.umax.v16i16(<16 x i16> %a)
+  ret i16 %arg1
+}
+
+define i32 @umaxv_v2i32(<2 x i32> %a) {
+; CHECK-LABEL: umaxv_v2i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umaxp v0.2s, v0.2s, v0.2s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @umaxv_v3i32(<3 x i32> %a) {
+; CHECK-SD-LABEL: umaxv_v3i32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    mov v0.s[3], wzr
+; CHECK-SD-NEXT:    umaxv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v3i32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov s2, v0.s[2]
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fmov w9, s2
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, hi
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fcsel s0, s0, s2, hi
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umax.v3i32(<3 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @umaxv_v4i32(<4 x i32> %a) {
+; CHECK-LABEL: umaxv_v4i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a)
+  ret i32 %arg1
+}
+
+define i32 @umaxv_v8i32(<8 x i32> %a) {
+; CHECK-LABEL: umaxv_v8i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-NEXT:    umaxv s0, v0.4s
+; CHECK-NEXT:    fmov w0, s0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i32 @llvm.vector.reduce.umax.v8i32(<8 x i32> %a)
+  ret i32 %arg1
+}
+
+define i64 @umaxv_v2i64(<2 x i64> %a) {
+; CHECK-SD-LABEL: umaxv_v2i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmhi d2, d0, d1
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v2i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, hi
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @umaxv_v3i64(<3 x i64> %a) {
+; CHECK-LABEL: umaxv_v3i64:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    // kill: def $d2 killed $d2 def $q2
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    mov v3.16b, v0.16b
+; CHECK-NEXT:    mov v4.16b, v2.16b
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-NEXT:    mov v3.d[1], v1.d[0]
+; CHECK-NEXT:    mov v4.d[1], xzr
+; CHECK-NEXT:    cmhi v3.2d, v3.2d, v4.2d
+; CHECK-NEXT:    ext v4.16b, v3.16b, v3.16b, #8
+; CHECK-NEXT:    bif v0.8b, v2.8b, v3.8b
+; CHECK-NEXT:    and v1.8b, v1.8b, v4.8b
+; CHECK-NEXT:    cmhi d2, d0, d1
+; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-NEXT:    fmov x0, d0
+; CHECK-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.umax.v3i64(<3 x i64> %a)
+  ret i64 %arg1
+}
+
+define i64 @umaxv_v4i64(<4 x i64> %a) {
+; CHECK-SD-LABEL: umaxv_v4i64:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmhi v2.2d, v0.2d, v1.2d
+; CHECK-SD-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmhi d2, d0, d1
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v4i64:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmhi v2.2d, v0.2d, v1.2d
+; CHECK-GI-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, hi
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i64 @llvm.vector.reduce.umax.v4i64(<4 x i64> %a)
+  ret i64 %arg1
+}
+
+define i128 @umaxv_v2i128(<2 x i128> %a) {
+; CHECK-SD-LABEL: umaxv_v2i128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    cmp x2, x0
+; CHECK-SD-NEXT:    sbcs xzr, x3, x1
+; CHECK-SD-NEXT:    csel x0, x0, x2, lo
+; CHECK-SD-NEXT:    csel x1, x1, x3, lo
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: umaxv_v2i128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    cset w8, hi
+; CHECK-GI-NEXT:    cmp x0, x2
+; CHECK-GI-NEXT:    cset w9, hi
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    csel w8, w9, w8, eq
+; CHECK-GI-NEXT:    tst w8, #0x1
+; CHECK-GI-NEXT:    csel x0, x0, x2, ne
+; CHECK-GI-NEXT:    csel x1, x1, x3, ne
+; CHECK-GI-NEXT:    ret
+entry:
+  %arg1 = call i128 @llvm.vector.reduce.umax.v2i128(<2 x i128> %a)
+  ret i128 %arg1
+}
\ No newline at end of file

>From 98d4c391047b07a62f9660d256ba8aacab4b45cd Mon Sep 17 00:00:00 2001
From: Tuan Chuong Goh <chuong.goh at arm.com>
Date: Mon, 30 Oct 2023 10:39:37 +0000
Subject: [PATCH 2/3] fixup! [AArch64][GlobalISel] Legalize
 G_VECREDUCE_{MIN/MAX}

---
 .../AArch64/vecreduce-umax-legalization.ll    | 346 ++++++++++++++----
 1 file changed, 265 insertions(+), 81 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
index a688d912e0310f8..53aefaf3d33600b 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-umax-legalization.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-none-linux-gnu -mattr=+neon -global-isel -global-isel-abort=2 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
 declare i1 @llvm.vector.reduce.umax.v1i1(<1 x i1> %a)
 declare i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> %a)
@@ -28,21 +29,33 @@ define i1 @test_v1i1(<1 x i1> %a) nounwind {
 }
 
 define i8 @test_v1i8(<1 x i8> %a) nounwind {
-; CHECK-LABEL: test_v1i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w0, v0.b[0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v1i8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    umov w0, v0.b[0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v1i8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-GI-NEXT:    ret
   %b = call i8 @llvm.vector.reduce.umax.v1i8(<1 x i8> %a)
   ret i8 %b
 }
 
 define i16 @test_v1i16(<1 x i16> %a) nounwind {
-; CHECK-LABEL: test_v1i16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    umov w0, v0.h[0]
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v1i16:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    umov w0, v0.h[0]
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v1i16:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-GI-NEXT:    ret
   %b = call i16 @llvm.vector.reduce.umax.v1i16(<1 x i16> %a)
   ret i16 %b
 }
@@ -56,21 +69,32 @@ define i24 @test_v1i24(<1 x i24> %a) nounwind {
 }
 
 define i32 @test_v1i32(<1 x i32> %a) nounwind {
-; CHECK-LABEL: test_v1i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v1i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v1i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    // kill: def $w0 killed $w0 killed $x0
+; CHECK-GI-NEXT:    ret
   %b = call i32 @llvm.vector.reduce.umax.v1i32(<1 x i32> %a)
   ret i32 %b
 }
 
 define i64 @test_v1i64(<1 x i64> %a) nounwind {
-; CHECK-LABEL: test_v1i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v1i64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v1i64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
   %b = call i64 @llvm.vector.reduce.umax.v1i64(<1 x i64> %a)
   ret i64 %b
 }
@@ -85,101 +109,261 @@ define i128 @test_v1i128(<1 x i128> %a) nounwind {
 
 ; No i64 vector support for UMAX.
 define i64 @test_v2i64(<2 x i64> %a) nounwind {
-; CHECK-LABEL: test_v2i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT:    cmhi d2, d0, d1
-; CHECK-NEXT:    bif v0.8b, v1.8b, v2.8b
-; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v2i64:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-SD-NEXT:    cmhi d2, d0, d1
+; CHECK-SD-NEXT:    bif v0.8b, v1.8b, v2.8b
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v2i64:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov d1, v0.d[1]
+; CHECK-GI-NEXT:    fmov x8, d0
+; CHECK-GI-NEXT:    fmov x9, d1
+; CHECK-GI-NEXT:    cmp x8, x9
+; CHECK-GI-NEXT:    fcsel d0, d0, d1, hi
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
   %b = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %a)
   ret i64 %b
 }
 
 define i8 @test_v3i8(<3 x i8> %a) nounwind {
-; CHECK-LABEL: test_v3i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    movi v0.2d, #0000000000000000
-; CHECK-NEXT:    mov v0.h[0], w0
-; CHECK-NEXT:    mov v0.h[1], w1
-; CHECK-NEXT:    mov v0.h[2], w2
-; CHECK-NEXT:    bic v0.4h, #255, lsl #8
-; CHECK-NEXT:    umaxv h0, v0.4h
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v3i8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    movi v0.2d, #0000000000000000
+; CHECK-SD-NEXT:    mov v0.h[0], w0
+; CHECK-SD-NEXT:    mov v0.h[1], w1
+; CHECK-SD-NEXT:    mov v0.h[2], w2
+; CHECK-SD-NEXT:    bic v0.4h, #255, lsl #8
+; CHECK-SD-NEXT:    umaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v3i8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    and w8, w0, #0xff
+; CHECK-GI-NEXT:    cmp w8, w1, uxtb
+; CHECK-GI-NEXT:    csel w8, w0, w1, hi
+; CHECK-GI-NEXT:    and w9, w8, #0xff
+; CHECK-GI-NEXT:    cmp w9, w2, uxtb
+; CHECK-GI-NEXT:    csel w0, w8, w2, hi
+; CHECK-GI-NEXT:    ret
   %b = call i8 @llvm.vector.reduce.umax.v3i8(<3 x i8> %a)
   ret i8 %b
 }
 
 define i8 @test_v9i8(<9 x i8> %a) nounwind {
-; CHECK-LABEL: test_v9i8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    adrp x8, .LCPI9_0
-; CHECK-NEXT:    ldr q1, [x8, :lo12:.LCPI9_0]
-; CHECK-NEXT:    and v0.16b, v0.16b, v1.16b
-; CHECK-NEXT:    umaxv b0, v0.16b
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v9i8:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    adrp x8, .LCPI9_0
+; CHECK-SD-NEXT:    ldr q1, [x8, :lo12:.LCPI9_0]
+; CHECK-SD-NEXT:    and v0.16b, v0.16b, v1.16b
+; CHECK-SD-NEXT:    umaxv b0, v0.16b
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v9i8:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov b1, v0.b[1]
+; CHECK-GI-NEXT:    mov b2, v0.b[2]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov b3, v0.b[3]
+; CHECK-GI-NEXT:    mov b4, v0.b[4]
+; CHECK-GI-NEXT:    fmov w9, s0
+; CHECK-GI-NEXT:    uxtb w8, w8
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    fmov w12, s1
+; CHECK-GI-NEXT:    mov b1, v0.b[5]
+; CHECK-GI-NEXT:    mov b2, v0.b[6]
+; CHECK-GI-NEXT:    cmp w8, w10, uxtb
+; CHECK-GI-NEXT:    fmov w10, s3
+; CHECK-GI-NEXT:    uxtb w8, w11
+; CHECK-GI-NEXT:    csel w9, w9, w12, hi
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    uxtb w8, w10
+; CHECK-GI-NEXT:    fmov w10, s4
+; CHECK-GI-NEXT:    csel w9, w9, w11, lo
+; CHECK-GI-NEXT:    fmov w11, s3
+; CHECK-GI-NEXT:    mov b3, v0.b[7]
+; CHECK-GI-NEXT:    mov b0, v0.b[8]
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    uxtb w8, w10
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    csel w9, w9, w11, lo
+; CHECK-GI-NEXT:    fmov w11, s4
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    uxtb w8, w10
+; CHECK-GI-NEXT:    fmov w10, s2
+; CHECK-GI-NEXT:    csel w9, w9, w11, lo
+; CHECK-GI-NEXT:    fmov w11, s1
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    uxtb w8, w10
+; CHECK-GI-NEXT:    fmov w10, s3
+; CHECK-GI-NEXT:    csel w9, w9, w11, lo
+; CHECK-GI-NEXT:    fmov w11, s2
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    uxtb w8, w10
+; CHECK-GI-NEXT:    fmov w10, s0
+; CHECK-GI-NEXT:    csel w9, w9, w11, lo
+; CHECK-GI-NEXT:    fmov w11, s3
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    uxtb w8, w10
+; CHECK-GI-NEXT:    csel w9, w9, w11, lo
+; CHECK-GI-NEXT:    cmp w8, w9, uxtb
+; CHECK-GI-NEXT:    csel w0, w9, w10, lo
+; CHECK-GI-NEXT:    ret
   %b = call i8 @llvm.vector.reduce.umax.v9i8(<9 x i8> %a)
   ret i8 %b
 }
 
 define i32 @test_v3i32(<3 x i32> %a) nounwind {
-; CHECK-LABEL: test_v3i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov v0.s[3], wzr
-; CHECK-NEXT:    umaxv s0, v0.4s
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v3i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    mov v0.s[3], wzr
+; CHECK-SD-NEXT:    umaxv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v3i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    mov s2, v0.s[2]
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fmov w9, s2
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, hi
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fcsel s0, s0, s2, hi
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %b = call i32 @llvm.vector.reduce.umax.v3i32(<3 x i32> %a)
   ret i32 %b
 }
 
 define i1 @test_v4i1(<4 x i1> %a) nounwind {
-; CHECK-LABEL: test_v4i1:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    shl v0.4h, v0.4h, #15
-; CHECK-NEXT:    cmlt v0.4h, v0.4h, #0
-; CHECK-NEXT:    umaxv h0, v0.4h
-; CHECK-NEXT:    fmov w8, s0
-; CHECK-NEXT:    and w0, w8, #0x1
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v4i1:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    shl v0.4h, v0.4h, #15
+; CHECK-SD-NEXT:    cmlt v0.4h, v0.4h, #0
+; CHECK-SD-NEXT:    umaxv h0, v0.4h
+; CHECK-SD-NEXT:    fmov w8, s0
+; CHECK-SD-NEXT:    and w0, w8, #0x1
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v4i1:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    mov h1, v0.h[1]
+; CHECK-GI-NEXT:    mov h2, v0.h[2]
+; CHECK-GI-NEXT:    mov h3, v0.h[3]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w10, s1
+; CHECK-GI-NEXT:    fmov w12, s2
+; CHECK-GI-NEXT:    fmov w13, s3
+; CHECK-GI-NEXT:    and w9, w8, #0x1
+; CHECK-GI-NEXT:    and w11, w10, #0x1
+; CHECK-GI-NEXT:    cmp w9, w11
+; CHECK-GI-NEXT:    and w9, w12, #0x1
+; CHECK-GI-NEXT:    and w11, w13, #0x1
+; CHECK-GI-NEXT:    csel w8, w8, w10, hi
+; CHECK-GI-NEXT:    cmp w9, w11
+; CHECK-GI-NEXT:    csel w9, w12, w13, hi
+; CHECK-GI-NEXT:    and w10, w8, #0x1
+; CHECK-GI-NEXT:    and w11, w9, #0x1
+; CHECK-GI-NEXT:    cmp w10, w11
+; CHECK-GI-NEXT:    csel w8, w8, w9, hi
+; CHECK-GI-NEXT:    and w0, w8, #0x1
+; CHECK-GI-NEXT:    ret
   %b = call i1 @llvm.vector.reduce.umax.v4i1(<4 x i1> %a)
   ret i1 %b
 }
 
 define i24 @test_v4i24(<4 x i24> %a) nounwind {
-; CHECK-LABEL: test_v4i24:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    bic v0.4s, #255, lsl #24
-; CHECK-NEXT:    umaxv s0, v0.4s
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v4i24:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    bic v0.4s, #255, lsl #24
+; CHECK-SD-NEXT:    umaxv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v4i24:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov s1, v0.s[1]
+; CHECK-GI-NEXT:    mov s2, v0.s[2]
+; CHECK-GI-NEXT:    mov s3, v0.s[3]
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    fmov w10, s2
+; CHECK-GI-NEXT:    fmov w11, s3
+; CHECK-GI-NEXT:    and w8, w8, #0xffffff
+; CHECK-GI-NEXT:    and w9, w9, #0xffffff
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    and w8, w10, #0xffffff
+; CHECK-GI-NEXT:    and w9, w11, #0xffffff
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, hi
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fcsel s1, s2, s3, hi
+; CHECK-GI-NEXT:    fmov w8, s0
+; CHECK-GI-NEXT:    fmov w9, s1
+; CHECK-GI-NEXT:    and w8, w8, #0xffffff
+; CHECK-GI-NEXT:    and w9, w9, #0xffffff
+; CHECK-GI-NEXT:    cmp w8, w9
+; CHECK-GI-NEXT:    fcsel s0, s0, s1, hi
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %b = call i24 @llvm.vector.reduce.umax.v4i24(<4 x i24> %a)
   ret i24 %b
 }
 
 define i128 @test_v2i128(<2 x i128> %a) nounwind {
-; CHECK-LABEL: test_v2i128:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    cmp x2, x0
-; CHECK-NEXT:    sbcs xzr, x3, x1
-; CHECK-NEXT:    csel x0, x0, x2, lo
-; CHECK-NEXT:    csel x1, x1, x3, lo
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v2i128:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    cmp x2, x0
+; CHECK-SD-NEXT:    sbcs xzr, x3, x1
+; CHECK-SD-NEXT:    csel x0, x0, x2, lo
+; CHECK-SD-NEXT:    csel x1, x1, x3, lo
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v2i128:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    cset w8, hi
+; CHECK-GI-NEXT:    cmp x0, x2
+; CHECK-GI-NEXT:    cset w9, hi
+; CHECK-GI-NEXT:    cmp x1, x3
+; CHECK-GI-NEXT:    csel w8, w9, w8, eq
+; CHECK-GI-NEXT:    tst w8, #0x1
+; CHECK-GI-NEXT:    csel x0, x0, x2, ne
+; CHECK-GI-NEXT:    csel x1, x1, x3, ne
+; CHECK-GI-NEXT:    ret
   %b = call i128 @llvm.vector.reduce.umax.v2i128(<2 x i128> %a)
   ret i128 %b
 }
 
 define i32 @test_v16i32(<16 x i32> %a) nounwind {
-; CHECK-LABEL: test_v16i32:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    umax v1.4s, v1.4s, v3.4s
-; CHECK-NEXT:    umax v0.4s, v0.4s, v2.4s
-; CHECK-NEXT:    umax v0.4s, v0.4s, v1.4s
-; CHECK-NEXT:    umaxv s0, v0.4s
-; CHECK-NEXT:    fmov w0, s0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_v16i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    umax v1.4s, v1.4s, v3.4s
+; CHECK-SD-NEXT:    umax v0.4s, v0.4s, v2.4s
+; CHECK-SD-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-SD-NEXT:    umaxv s0, v0.4s
+; CHECK-SD-NEXT:    fmov w0, s0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_v16i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    umax v1.4s, v2.4s, v3.4s
+; CHECK-GI-NEXT:    umax v0.4s, v0.4s, v1.4s
+; CHECK-GI-NEXT:    umaxv s0, v0.4s
+; CHECK-GI-NEXT:    fmov w0, s0
+; CHECK-GI-NEXT:    ret
   %b = call i32 @llvm.vector.reduce.umax.v16i32(<16 x i32> %a)
   ret i32 %b
 }

>From 69fd4dc6ec5431931677f38dfeb3877a517e4de3 Mon Sep 17 00:00:00 2001
From: Tuan Chuong Goh <chuong.goh at arm.com>
Date: Thu, 2 Nov 2023 13:09:35 +0000
Subject: [PATCH 3/3] fixup! [AArch64][GlobalISel] Legalize
 G_VECREDUCE_{MIN/MAX}

---
 llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td | 4 ++++
 llvm/lib/Target/AArch64/AArch64InstrGISel.td              | 5 -----
 llvm/lib/Target/AArch64/AArch64InstrInfo.td               | 7 ++++---
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
index f767100874e94e1..7adc1545915744d 100644
--- a/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
+++ b/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td
@@ -173,6 +173,10 @@ def : GINodeEquiv<G_VECREDUCE_FMAX, vecreduce_fmax>;
 def : GINodeEquiv<G_VECREDUCE_FMIN, vecreduce_fmin>;
 def : GINodeEquiv<G_VECREDUCE_FMAXIMUM, vecreduce_fmaximum>;
 def : GINodeEquiv<G_VECREDUCE_FMINIMUM, vecreduce_fminimum>;
+def : GINodeEquiv<G_VECREDUCE_UMIN, vecreduce_umin>;
+def : GINodeEquiv<G_VECREDUCE_UMAX, vecreduce_umax>;
+def : GINodeEquiv<G_VECREDUCE_SMIN, vecreduce_smin>;
+def : GINodeEquiv<G_VECREDUCE_SMAX, vecreduce_smax>;
 
 def : GINodeEquiv<G_STRICT_FADD, strict_fadd>;
 def : GINodeEquiv<G_STRICT_FSUB, strict_fsub>;
diff --git a/llvm/lib/Target/AArch64/AArch64InstrGISel.td b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
index a3e8b1fff32eee9..27338bd24393325 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrGISel.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrGISel.td
@@ -274,11 +274,6 @@ def : GINodeEquiv<G_EXTRACT_VECTOR_ELT, vector_extract>;
 
 def : GINodeEquiv<G_PREFETCH, AArch64Prefetch>;
 
-def : GINodeEquiv<G_VECREDUCE_UMIN, vecreduce_umin>;
-def : GINodeEquiv<G_VECREDUCE_UMAX, vecreduce_umax>;
-def : GINodeEquiv<G_VECREDUCE_SMIN, vecreduce_smin>;
-def : GINodeEquiv<G_VECREDUCE_SMAX, vecreduce_smax>;
-
 // These are patterns that we only use for GlobalISel via the importer.
 def : Pat<(f32 (fadd (vector_extract (v2f32 FPR64:$Rn), (i64 0)),
                      (vector_extract (v2f32 FPR64:$Rn), (i64 1)))),
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index caed23c39c14408..6f38c41c228ae0e 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6663,9 +6663,10 @@ defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
 def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
           (UMINPv2i32 V64:$Rn, V64:$Rn)>;
 
-// For vecreduce_{opc}
+// For vecreduce_{opc} used by GlobalISel, not SDAG at the moment
+// because GlobalISel allows us to specify the return register to be a FPR
 multiclass SIMDAcrossLanesVecReductionIntrinsic<string baseOpc,
-                                            SDPatternOperator opNode> {
+                                               SDPatternOperator opNode> {
 def : Pat<(i8 (opNode (v8i8 FPR64:$Rn))),
           (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) FPR64:$Rn)>;
 
@@ -6680,9 +6681,9 @@ def : Pat<(i16 (opNode (v8i16 FPR128:$Rn))),
 
 def : Pat<(i32 (opNode (v4i32 V128:$Rn))), 
           (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn)>;
-
 }
 
+// For v2i32 source type, the pairwise instruction can be used instead
 defm : SIMDAcrossLanesVecReductionIntrinsic<"UMINV", vecreduce_umin>;
 def : Pat<(i32 (vecreduce_umin (v2i32 V64:$Rn))), 
           (i32 (EXTRACT_SUBREG (UMINPv2i32 V64:$Rn, V64:$Rn), ssub))>;



More information about the llvm-commits mailing list