[llvm] 831ee6b - [AArch64][GlobalISel] Optimise lowering for some vector types for min/max

Irina Dobrescu via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 15 03:35:02 PDT 2021


Author: Irina Dobrescu
Date: 2021-07-15T11:34:32+01:00
New Revision: 831ee6b0c38bbb9ed1206a67a5a1df4e24066ea0

URL: https://github.com/llvm/llvm-project/commit/831ee6b0c38bbb9ed1206a67a5a1df4e24066ea0
DIFF: https://github.com/llvm/llvm-project/commit/831ee6b0c38bbb9ed1206a67a5a1df4e24066ea0.diff

LOG: [AArch64][GlobalISel] Optimise lowering for some vector types for min/max

Differential Revision: https://reviews.llvm.org/D105696

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
    llvm/test/CodeGen/AArch64/min-max.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 02eb3287363b..153bc59470cd 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -168,6 +168,12 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
 
   getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
       .legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
+      .clampNumElements(0, v8s8, v16s8)
+      .clampNumElements(0, v4s16, v8s16)
+      .clampNumElements(0, v2s32, v4s32)
+      // FIXME: This sholdn't be needed as v2s64 types are going to
+      // be expanded anyway, but G_ICMP doesn't support splitting vectors yet
+      .clampNumElements(0, v2s64, v2s64)
       .lower();
 
   getActionDefinitionsBuilder(

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
index 11f676f49529..e237d0d77861 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir
@@ -42,6 +42,34 @@ body: |
     $q0 = COPY %smin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v32s8_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v32s8_smin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[SMIN:%[0-9]+]]:_(<16 x s8>) = G_SMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[SMIN1:%[0-9]+]]:_(<16 x s8>) = G_SMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[SMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[SMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
+    %vec:_(<32 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<32 x s8>) = G_IMPLICIT_DEF
+    %smin:_(<32 x s8>) = G_SMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smin(<32 x s8>), %1(p0) :: (store (<32 x s8>))
+
 ...
 ---
 name:            v4s16_smin
@@ -84,6 +112,34 @@ body: |
     $q0 = COPY %smin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v16s16_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v16s16_smin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[SMIN:%[0-9]+]]:_(<8 x s16>) = G_SMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[SMIN1:%[0-9]+]]:_(<8 x s16>) = G_SMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[SMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[SMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
+    %vec:_(<16 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s16>) = G_IMPLICIT_DEF
+    %smin:_(<16 x s16>) = G_SMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smin(<16 x s16>), %1(p0) :: (store (<16 x s16>))
+
 ...
 ---
 name:            v2s32_smin
@@ -126,6 +182,34 @@ body: |
     $q0 = COPY %smin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v8s32_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v8s32_smin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[SMIN:%[0-9]+]]:_(<4 x s32>) = G_SMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[SMIN1:%[0-9]+]]:_(<4 x s32>) = G_SMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[SMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[SMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
+    %vec:_(<8 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s32>) = G_IMPLICIT_DEF
+    %smin:_(<8 x s32>) = G_SMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smin(<8 x s32>), %1(p0) :: (store (<8 x s32>))
+
 ...
 ---
 name:            v2s64_smin
@@ -158,6 +242,50 @@ body: |
     $q0 = COPY %smin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v4s64_smin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v4s64_smin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]]
+    ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]]
+    ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]]
+    ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]]
+    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32)
+    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64)
+    ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
+    %vec:_(<4 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s64>) = G_IMPLICIT_DEF
+    %smin:_(<4 x s64>) = G_SMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smin(<4 x s64>), %1(p0) :: (store (<4 x s64>))
+
 ...
 ---
 name:            v8s8_umin
@@ -200,6 +328,34 @@ body: |
     $q0 = COPY %umin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v32s8_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v32s8_umin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[UMIN:%[0-9]+]]:_(<16 x s8>) = G_UMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[UMIN1:%[0-9]+]]:_(<16 x s8>) = G_UMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[UMIN]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[UMIN1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
+    %vec:_(<32 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<32 x s8>) = G_IMPLICIT_DEF
+    %umin:_(<32 x s8>) = G_UMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umin(<32 x s8>), %1(p0) :: (store (<32 x s8>))
+
 ...
 ---
 name:            v4s16_umin
@@ -242,6 +398,34 @@ body: |
     $q0 = COPY %umin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v16s16_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v16s16_umin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[UMIN:%[0-9]+]]:_(<8 x s16>) = G_UMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[UMIN1:%[0-9]+]]:_(<8 x s16>) = G_UMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[UMIN]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[UMIN1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
+    %vec:_(<16 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s16>) = G_IMPLICIT_DEF
+    %umin:_(<16 x s16>) = G_UMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umin(<16 x s16>), %1(p0) :: (store (<16 x s16>))
+
 ...
 ---
 name:            v2s32_umin
@@ -284,6 +468,34 @@ body: |
     $q0 = COPY %umin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v8s32_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v8s32_umin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[UMIN:%[0-9]+]]:_(<4 x s32>) = G_UMIN [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[UMIN1:%[0-9]+]]:_(<4 x s32>) = G_UMIN [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[UMIN]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[UMIN1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
+    %vec:_(<8 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s32>) = G_IMPLICIT_DEF
+    %umin:_(<8 x s32>) = G_UMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umin(<8 x s32>), %1(p0) :: (store (<8 x s32>))
+
 ...
 ---
 name:            v2s64_umin
@@ -316,6 +528,50 @@ body: |
     $q0 = COPY %umin
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v4s64_umin
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v4s64_umin
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]]
+    ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]]
+    ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]]
+    ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]]
+    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32)
+    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64)
+    ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
+    %vec:_(<4 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s64>) = G_IMPLICIT_DEF
+    %umin:_(<4 x s64>) = G_UMIN %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umin(<4 x s64>), %1(p0) :: (store (<4 x s64>))
+
 ...
 ---
 name:            v8s8_smax
@@ -379,6 +635,34 @@ body: |
     $x0 = COPY %smax
     RET_ReallyLR implicit $x0
 
+...
+---
+name:            v32s8_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v32s8_smax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[SMAX:%[0-9]+]]:_(<16 x s8>) = G_SMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[SMAX1:%[0-9]+]]:_(<16 x s8>) = G_SMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[SMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[SMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
+    %vec:_(<32 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<32 x s8>) = G_IMPLICIT_DEF
+    %smax:_(<32 x s8>) = G_SMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smax(<32 x s8>), %1(p0) :: (store (<32 x s8>))
+
 ...
 ---
 name:            v8s16_smax
@@ -400,6 +684,34 @@ body: |
     $q0 = COPY %smax
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v16s16_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v16s16_smax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[SMAX:%[0-9]+]]:_(<8 x s16>) = G_SMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[SMAX1:%[0-9]+]]:_(<8 x s16>) = G_SMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[SMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[SMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
+    %vec:_(<16 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s16>) = G_IMPLICIT_DEF
+    %smax:_(<16 x s16>) = G_SMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smax(<16 x s16>), %1(p0) :: (store (<16 x s16>))
+
 ...
 ---
 name:            v2s32_smax
@@ -442,6 +754,34 @@ body: |
     $q0 = COPY %smax
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v8s32_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v8s32_smax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[SMAX:%[0-9]+]]:_(<4 x s32>) = G_SMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[SMAX1:%[0-9]+]]:_(<4 x s32>) = G_SMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[SMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[SMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
+    %vec:_(<8 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s32>) = G_IMPLICIT_DEF
+    %smax:_(<8 x s32>) = G_SMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smax(<8 x s32>), %1(p0) :: (store (<8 x s32>))
+
 ...
 ---
 name:            v2s64_smax
@@ -474,6 +814,50 @@ body: |
     $q0 = COPY %smax
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v4s64_smax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v4s64_smax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]]
+    ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]]
+    ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]]
+    ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]]
+    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32)
+    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64)
+    ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
+    %vec:_(<4 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s64>) = G_IMPLICIT_DEF
+    %smax:_(<4 x s64>) = G_SMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %smax(<4 x s64>), %1(p0) :: (store (<4 x s64>))
+
 ...
 ---
 name:            v8s8_umax
@@ -516,6 +900,34 @@ body: |
     $q0 = COPY %umax
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v32s8_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v32s8_umax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8), [[DEF]](s8)
+    ; CHECK: [[UMAX:%[0-9]+]]:_(<16 x s8>) = G_UMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[UMAX1:%[0-9]+]]:_(<16 x s8>) = G_UMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[UMAX]](<16 x s8>), [[COPY]](p0) :: (store (<16 x s8>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[UMAX1]](<16 x s8>), [[PTR_ADD]](p0) :: (store (<16 x s8>) into unknown-address + 16)
+    %vec:_(<32 x s8>) = G_IMPLICIT_DEF
+    %vec1:_(<32 x s8>) = G_IMPLICIT_DEF
+    %umax:_(<32 x s8>) = G_UMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umax(<32 x s8>), %1(p0) :: (store (<32 x s8>))
+
 ...
 ---
 name:            v4s16_umax
@@ -558,6 +970,34 @@ body: |
     $q0 = COPY %umax
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v16s16_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v16s16_umax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<8 x s16>) = G_BUILD_VECTOR [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16), [[DEF]](s16)
+    ; CHECK: [[UMAX:%[0-9]+]]:_(<8 x s16>) = G_UMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[UMAX1:%[0-9]+]]:_(<8 x s16>) = G_UMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[UMAX]](<8 x s16>), [[COPY]](p0) :: (store (<8 x s16>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[UMAX1]](<8 x s16>), [[PTR_ADD]](p0) :: (store (<8 x s16>) into unknown-address + 16)
+    %vec:_(<16 x s16>) = G_IMPLICIT_DEF
+    %vec1:_(<16 x s16>) = G_IMPLICIT_DEF
+    %umax:_(<16 x s16>) = G_UMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umax(<16 x s16>), %1(p0) :: (store (<16 x s16>))
+
 ...
 ---
 name:            v2s32_umax
@@ -600,6 +1040,34 @@ body: |
     $q0 = COPY %umax
     RET_ReallyLR implicit $q0
 
+...
+---
+name:            v8s32_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v8s32_umax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[DEF]](s32), [[DEF]](s32), [[DEF]](s32), [[DEF]](s32)
+    ; CHECK: [[UMAX:%[0-9]+]]:_(<4 x s32>) = G_UMAX [[BUILD_VECTOR]], [[BUILD_VECTOR2]]
+    ; CHECK: [[UMAX1:%[0-9]+]]:_(<4 x s32>) = G_UMAX [[BUILD_VECTOR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[UMAX]](<4 x s32>), [[COPY]](p0) :: (store (<4 x s32>), align 32)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+    ; CHECK: G_STORE [[UMAX1]](<4 x s32>), [[PTR_ADD]](p0) :: (store (<4 x s32>) into unknown-address + 16)
+    %vec:_(<8 x s32>) = G_IMPLICIT_DEF
+    %vec1:_(<8 x s32>) = G_IMPLICIT_DEF
+    %umax:_(<8 x s32>) = G_UMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umax(<8 x s32>), %1(p0) :: (store (<8 x s32>))
+
 ...
 ---
 name:            v2s64_umax
@@ -633,4 +1101,48 @@ body: |
     RET_ReallyLR implicit $q0
 
 ...
+---
+name:            v4s64_umax
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $q0, $q1
+
+    ; CHECK-LABEL: name: v4s64_umax
+    ; CHECK: liveins: $x0, $q0, $q1
+    ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF
+    ; CHECK: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP]](<2 x s64>)
+    ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+    ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[ASHR:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL]], [[BUILD_VECTOR]](<2 x s64>)
+    ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+    ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR]], [[BUILD_VECTOR1]]
+    ; CHECK: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR]]
+    ; CHECK: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]]
+    ; CHECK: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]]
+    ; CHECK: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]]
+    ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY [[ICMP1]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+    ; CHECK: [[SHL1:%[0-9]+]]:_(<2 x s64>) = G_SHL [[COPY1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[ASHR1:%[0-9]+]]:_(<2 x s64>) = G_ASHR [[SHL1]], [[BUILD_VECTOR2]](<2 x s64>)
+    ; CHECK: [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
+    ; CHECK: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ASHR1]], [[BUILD_VECTOR3]]
+    ; CHECK: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ASHR1]]
+    ; CHECK: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]]
+    ; CHECK: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]]
+    ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0
+    ; CHECK: G_STORE [[OR]](<2 x s64>), [[COPY2]](p0) :: (store (<2 x s64>), align 32)
+    ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+    ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY2]], [[C2]](s64)
+    ; CHECK: G_STORE [[OR1]](<2 x s64>), [[PTR_ADD]](p0) :: (store (<2 x s64>) into unknown-address + 16)
+    %vec:_(<4 x s64>) = G_IMPLICIT_DEF
+    %vec1:_(<4 x s64>) = G_IMPLICIT_DEF
+    %umax:_(<4 x s64>) = G_UMAX %vec, %vec1
+    %1:_(p0) = COPY $x0
+    G_STORE %umax(<4 x s64>), %1(p0) :: (store (<4 x s64>))
+
+...
 

diff  --git a/llvm/test/CodeGen/AArch64/min-max.ll b/llvm/test/CodeGen/AArch64/min-max.ll
index ec4998bbc023..1536a6d0cb9d 100644
--- a/llvm/test/CodeGen/AArch64/min-max.ll
+++ b/llvm/test/CodeGen/AArch64/min-max.ll
@@ -1,5 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s
+; RUN: llc -mtriple=aarch64-eabi %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-ISEL
+; RUN: llc -mtriple=aarch64-eabi -global-isel %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GLOBAL
+
 
 ; These tests just check that the plumbing is in place for @llvm.smax, @llvm.umax,
 ; @llvm.smin, @llvm.umin.
@@ -7,13 +9,20 @@
 declare i8 @llvm.smax.i8(i8 %a, i8 %b) readnone
 
 define i8 @smaxi8(i8 %a, i8 %b) {
-; CHECK-LABEL: smaxi8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxtb w8, w1
-; CHECK-NEXT:    sxtb w9, w0
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, gt
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: smaxi8:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    sxtb w8, w1
+; CHECK-ISEL-NEXT:    sxtb w9, w0
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, gt
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smaxi8:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    sxtb w8, w0
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, sxtb
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, gt
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i8 @llvm.smax.i8(i8 %a, i8 %b)
   ret i8 %c
 }
@@ -21,13 +30,20 @@ define i8 @smaxi8(i8 %a, i8 %b) {
 declare i16 @llvm.smax.i16(i16 %a, i16 %b) readnone
 
 define i16 @smaxi16(i16 %a, i16 %b) {
-; CHECK-LABEL: smaxi16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxth w8, w1
-; CHECK-NEXT:    sxth w9, w0
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, gt
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: smaxi16:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    sxth w8, w1
+; CHECK-ISEL-NEXT:    sxth w9, w0
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, gt
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smaxi16:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    sxth w8, w0
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, sxth
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, gt
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i16 @llvm.smax.i16(i16 %a, i16 %b)
   ret i16 %c
 }
@@ -78,6 +94,20 @@ define <16 x i8> @smax16i8(<16 x i8> %a, <16 x i8> %b) {
   ret <16 x i8> %c
 }
 
+declare <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
+
+define void @smax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+; CHECK-LABEL: smax32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smax v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    smax v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <32 x i8> @llvm.smax.v32i8(<32 x i8> %a, <32 x i8> %b)
+  store <32 x i8> %c, <32 x i8>* %p
+  ret void
+}
+
 declare <4 x i16> @llvm.smax.v4i16(<4 x i16> %a, <4 x i16> %b) readnone
 
 define <4 x i16> @smax4i16(<4 x i16> %a, <4 x i16> %b) {
@@ -100,6 +130,20 @@ define <8 x i16> @smax8i16(<8 x i16> %a, <8 x i16> %b) {
   ret <8 x i16> %c
 }
 
+declare <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
+
+define void @smax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+; CHECK-LABEL: smax16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smax v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    smax v1.8h, v1.8h, v3.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <16 x i16> @llvm.smax.v16i16(<16 x i16> %a, <16 x i16> %b)
+  store <16 x i16> %c, <16 x i16>* %p
+  ret void
+}
+
 declare <2 x i32> @llvm.smax.v2i32(<2 x i32> %a, <2 x i32> %b) readnone
 
 define <2 x i32> @smax2i32(<2 x i32> %a, <2 x i32> %b) {
@@ -122,19 +166,41 @@ define <4 x i32> @smax4i32(<4 x i32> %a, <4 x i32> %b) {
   ret <4 x i32> %c
 }
 
-declare <1 x i64> @llvm.smax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+declare <8 x i32> @llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define <1 x i64> @smax1i64(<1 x i64> %a, <1 x i64> %b) {
-; CHECK-LABEL: smax1i64:
+define void @smax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+; CHECK-LABEL: smax8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csel x8, x9, x8, gt
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    smax v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    smax v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
+  %c = call <8 x i32>@llvm.smax.v8i32(<8 x i32> %a, <8 x i32> %b)
+  store <8 x i32> %c, <8 x i32>* %p
+  ret void
+}
+
+declare <1 x i64> @llvm.smax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+
+define <1 x i64> @smax1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-ISEL-LABEL: smax1i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-ISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-ISEL-NEXT:    fmov x8, d1
+; CHECK-ISEL-NEXT:    fmov x9, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, gt
+; CHECK-ISEL-NEXT:    fmov d0, x8
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smax1i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    fmov x8, d0
+; CHECK-GLOBAL-NEXT:    fmov x9, d1
+; CHECK-GLOBAL-NEXT:    cmp x8, x9
+; CHECK-GLOBAL-NEXT:    fcsel d0, d0, d1, gt
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <1 x i64> @llvm.smax.v1i64(<1 x i64> %a, <1 x i64> %b)
   ret <1 x i64> %c
 }
@@ -142,33 +208,93 @@ define <1 x i64> @smax1i64(<1 x i64> %a, <1 x i64> %b) {
 declare <2 x i64> @llvm.smax.v2i64(<2 x i64> %a, <2 x i64> %b) readnone
 
 define <2 x i64> @smax2i64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-LABEL: smax2i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, v1.d[1]
-; CHECK-NEXT:    mov x9, v0.d[1]
-; CHECK-NEXT:    fmov x10, d1
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csel x8, x9, x8, gt
-; CHECK-NEXT:    cmp x11, x10
-; CHECK-NEXT:    csel x9, x11, x10, gt
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    mov v0.d[1], x8
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: smax2i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    mov x8, v1.d[1]
+; CHECK-ISEL-NEXT:    mov x9, v0.d[1]
+; CHECK-ISEL-NEXT:    fmov x10, d1
+; CHECK-ISEL-NEXT:    fmov x11, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, gt
+; CHECK-ISEL-NEXT:    cmp x11, x10
+; CHECK-ISEL-NEXT:    csel x9, x11, x10, gt
+; CHECK-ISEL-NEXT:    fmov d0, x9
+; CHECK-ISEL-NEXT:    mov v0.d[1], x8
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smax2i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmgt v2.2d, v0.2d, v1.2d
+; CHECK-GLOBAL-NEXT:    shl v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <2 x i64> @llvm.smax.v2i64(<2 x i64> %a, <2 x i64> %b)
   ret <2 x i64> %c
 }
 
+declare <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
+
+define void @smax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+; CHECK-ISEL-LABEL: smax4i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    mov x8, v2.d[1]
+; CHECK-ISEL-NEXT:    mov x9, v0.d[1]
+; CHECK-ISEL-NEXT:    fmov x10, d2
+; CHECK-ISEL-NEXT:    fmov x11, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, gt
+; CHECK-ISEL-NEXT:    cmp x11, x10
+; CHECK-ISEL-NEXT:    mov x9, v3.d[1]
+; CHECK-ISEL-NEXT:    csel x10, x11, x10, gt
+; CHECK-ISEL-NEXT:    mov x11, v1.d[1]
+; CHECK-ISEL-NEXT:    cmp x11, x9
+; CHECK-ISEL-NEXT:    fmov d0, x10
+; CHECK-ISEL-NEXT:    fmov x10, d3
+; CHECK-ISEL-NEXT:    csel x9, x11, x9, gt
+; CHECK-ISEL-NEXT:    fmov x11, d1
+; CHECK-ISEL-NEXT:    cmp x11, x10
+; CHECK-ISEL-NEXT:    csel x10, x11, x10, gt
+; CHECK-ISEL-NEXT:    fmov d1, x10
+; CHECK-ISEL-NEXT:    mov v0.d[1], x8
+; CHECK-ISEL-NEXT:    mov v1.d[1], x9
+; CHECK-ISEL-NEXT:    stp q0, q1, [x0]
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smax4i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmgt v4.2d, v0.2d, v2.2d
+; CHECK-GLOBAL-NEXT:    cmgt v5.2d, v1.2d, v3.2d
+; CHECK-GLOBAL-NEXT:    shl v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    shl v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v2.16b, v4.16b
+; CHECK-GLOBAL-NEXT:    bif v1.16b, v3.16b, v5.16b
+; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
+; CHECK-GLOBAL-NEXT:    ret
+  %c = call <4 x i64> @llvm.smax.v4i64(<4 x i64> %a, <4 x i64> %b)
+  store <4 x i64> %c, <4 x i64>* %p
+  ret void
+}
+
 declare i8 @llvm.umax.i8(i8 %a, i8 %b) readnone
 
 define i8 @umaxi8(i8 %a, i8 %b) {
-; CHECK-LABEL: umaxi8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    and w8, w1, #0xff
-; CHECK-NEXT:    and w9, w0, #0xff
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, hi
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: umaxi8:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    and w8, w1, #0xff
+; CHECK-ISEL-NEXT:    and w9, w0, #0xff
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, hi
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umaxi8:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    and w8, w0, #0xff
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, uxtb
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, hi
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i8 @llvm.umax.i8(i8 %a, i8 %b)
   ret i8 %c
 }
@@ -176,13 +302,20 @@ define i8 @umaxi8(i8 %a, i8 %b) {
 declare i16 @llvm.umax.i16(i16 %a, i16 %b) readnone
 
 define i16 @umaxi16(i16 %a, i16 %b) {
-; CHECK-LABEL: umaxi16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    and w8, w1, #0xffff
-; CHECK-NEXT:    and w9, w0, #0xffff
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, hi
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: umaxi16:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    and w8, w1, #0xffff
+; CHECK-ISEL-NEXT:    and w9, w0, #0xffff
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, hi
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umaxi16:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    and w8, w0, #0xffff
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, uxth
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, hi
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i16 @llvm.umax.i16(i16 %a, i16 %b)
   ret i16 %c
 }
@@ -233,6 +366,20 @@ define <16 x i8> @umax16i8(<16 x i8> %a, <16 x i8> %b) {
   ret <16 x i8> %c
 }
 
+declare <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
+
+define void @umax32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+; CHECK-LABEL: umax32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umax v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    umax v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <32 x i8> @llvm.umax.v32i8(<32 x i8> %a, <32 x i8> %b)
+  store <32 x i8> %c, <32 x i8>* %p
+  ret void
+}
+
 declare <4 x i16> @llvm.umax.v4i16(<4 x i16> %a, <4 x i16> %b) readnone
 
 define <4 x i16> @umax4i16(<4 x i16> %a, <4 x i16> %b) {
@@ -255,6 +402,20 @@ define <8 x i16> @umax8i16(<8 x i16> %a, <8 x i16> %b) {
   ret <8 x i16> %c
 }
 
+declare <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
+
+define void @umax16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+; CHECK-LABEL: umax16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umax v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    umax v1.8h, v1.8h, v3.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <16 x i16> @llvm.umax.v16i16(<16 x i16> %a, <16 x i16> %b)
+  store <16 x i16> %c, <16 x i16>* %p
+  ret void
+}
+
 declare <2 x i32> @llvm.umax.v2i32(<2 x i32> %a, <2 x i32> %b) readnone
 
 define <2 x i32> @umax2i32(<2 x i32> %a, <2 x i32> %b) {
@@ -277,19 +438,41 @@ define <4 x i32> @umax4i32(<4 x i32> %a, <4 x i32> %b) {
   ret <4 x i32> %c
 }
 
-declare <1 x i64> @llvm.umax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+declare <8 x i32> @llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define <1 x i64> @umax1i64(<1 x i64> %a, <1 x i64> %b) {
-; CHECK-LABEL: umax1i64:
+define void @umax8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+; CHECK-LABEL: umax8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csel x8, x9, x8, hi
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    umax v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    umax v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
+  %c = call <8 x i32>@llvm.umax.v8i32(<8 x i32> %a, <8 x i32> %b)
+  store <8 x i32> %c, <8 x i32>* %p
+  ret void
+}
+
+declare <1 x i64> @llvm.umax.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+
+define <1 x i64> @umax1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-ISEL-LABEL: umax1i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-ISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-ISEL-NEXT:    fmov x8, d1
+; CHECK-ISEL-NEXT:    fmov x9, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, hi
+; CHECK-ISEL-NEXT:    fmov d0, x8
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umax1i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    fmov x8, d0
+; CHECK-GLOBAL-NEXT:    fmov x9, d1
+; CHECK-GLOBAL-NEXT:    cmp x8, x9
+; CHECK-GLOBAL-NEXT:    fcsel d0, d0, d1, hi
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <1 x i64> @llvm.umax.v1i64(<1 x i64> %a, <1 x i64> %b)
   ret <1 x i64> %c
 }
@@ -297,25 +480,69 @@ define <1 x i64> @umax1i64(<1 x i64> %a, <1 x i64> %b) {
 declare <2 x i64> @llvm.umax.v2i64(<2 x i64> %a, <2 x i64> %b) readnone
 
 define <2 x i64> @umax2i64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-LABEL: umax2i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uqsub v1.2d, v1.2d, v0.2d
-; CHECK-NEXT:    add v0.2d, v0.2d, v1.2d
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: umax2i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    uqsub v1.2d, v1.2d, v0.2d
+; CHECK-ISEL-NEXT:    add v0.2d, v0.2d, v1.2d
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umax2i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmhi v2.2d, v0.2d, v1.2d
+; CHECK-GLOBAL-NEXT:    shl v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <2 x i64> @llvm.umax.v2i64(<2 x i64> %a, <2 x i64> %b)
   ret <2 x i64> %c
 }
 
+declare <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
+
+define void @umax4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+; CHECK-ISEL-LABEL: umax4i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    uqsub v2.2d, v2.2d, v0.2d
+; CHECK-ISEL-NEXT:    uqsub v3.2d, v3.2d, v1.2d
+; CHECK-ISEL-NEXT:    add v0.2d, v0.2d, v2.2d
+; CHECK-ISEL-NEXT:    add v1.2d, v1.2d, v3.2d
+; CHECK-ISEL-NEXT:    stp q0, q1, [x0]
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umax4i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmhi v4.2d, v0.2d, v2.2d
+; CHECK-GLOBAL-NEXT:    cmhi v5.2d, v1.2d, v3.2d
+; CHECK-GLOBAL-NEXT:    shl v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    shl v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v2.16b, v4.16b
+; CHECK-GLOBAL-NEXT:    bif v1.16b, v3.16b, v5.16b
+; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
+; CHECK-GLOBAL-NEXT:    ret
+  %c = call <4 x i64> @llvm.umax.v4i64(<4 x i64> %a, <4 x i64> %b)
+  store <4 x i64> %c, <4 x i64>* %p
+  ret void
+}
+
 declare i8 @llvm.smin.i8(i8 %a, i8 %b) readnone
 
 define i8 @smini8(i8 %a, i8 %b) {
-; CHECK-LABEL: smini8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxtb w8, w1
-; CHECK-NEXT:    sxtb w9, w0
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, lt
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: smini8:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    sxtb w8, w1
+; CHECK-ISEL-NEXT:    sxtb w9, w0
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, lt
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smini8:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    sxtb w8, w0
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, sxtb
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, lt
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i8 @llvm.smin.i8(i8 %a, i8 %b)
   ret i8 %c
 }
@@ -323,13 +550,20 @@ define i8 @smini8(i8 %a, i8 %b) {
 declare i16 @llvm.smin.i16(i16 %a, i16 %b) readnone
 
 define i16 @smini16(i16 %a, i16 %b) {
-; CHECK-LABEL: smini16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    sxth w8, w1
-; CHECK-NEXT:    sxth w9, w0
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, lt
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: smini16:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    sxth w8, w1
+; CHECK-ISEL-NEXT:    sxth w9, w0
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, lt
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smini16:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    sxth w8, w0
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, sxth
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, lt
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i16 @llvm.smin.i16(i16 %a, i16 %b)
   ret i16 %c
 }
@@ -380,6 +614,20 @@ define <16 x i8> @smin16i8(<16 x i8> %a, <16 x i8> %b) {
   ret <16 x i8> %c
 }
 
+declare <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
+
+define void @smin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+; CHECK-LABEL: smin32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smin v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    smin v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <32 x i8> @llvm.smin.v32i8(<32 x i8> %a, <32 x i8> %b)
+  store <32 x i8> %c, <32 x i8>* %p
+  ret void
+}
+
 declare <4 x i16> @llvm.smin.v4i16(<4 x i16> %a, <4 x i16> %b) readnone
 
 define <4 x i16> @smin4i16(<4 x i16> %a, <4 x i16> %b) {
@@ -402,6 +650,20 @@ define <8 x i16> @smin8i16(<8 x i16> %a, <8 x i16> %b) {
   ret <8 x i16> %c
 }
 
+declare <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
+
+define void @smin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+; CHECK-LABEL: smin16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    smin v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    smin v1.8h, v1.8h, v3.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <16 x i16> @llvm.smin.v16i16(<16 x i16> %a, <16 x i16> %b)
+  store <16 x i16> %c, <16 x i16>* %p
+  ret void
+}
+
 declare <2 x i32> @llvm.smin.v2i32(<2 x i32> %a, <2 x i32> %b) readnone
 
 define <2 x i32> @smin2i32(<2 x i32> %a, <2 x i32> %b) {
@@ -424,19 +686,41 @@ define <4 x i32> @smin4i32(<4 x i32> %a, <4 x i32> %b) {
   ret <4 x i32> %c
 }
 
-declare <1 x i64> @llvm.smin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+declare <8 x i32> @llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define <1 x i64> @smin1i64(<1 x i64> %a, <1 x i64> %b) {
-; CHECK-LABEL: smin1i64:
+define void @smin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+; CHECK-LABEL: smin8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csel x8, x9, x8, lt
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    smin v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    smin v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
+  %c = call <8 x i32>@llvm.smin.v8i32(<8 x i32> %a, <8 x i32> %b)
+  store <8 x i32> %c, <8 x i32>* %p
+  ret void
+}
+
+declare <1 x i64> @llvm.smin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+
+define <1 x i64> @smin1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-ISEL-LABEL: smin1i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-ISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-ISEL-NEXT:    fmov x8, d1
+; CHECK-ISEL-NEXT:    fmov x9, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, lt
+; CHECK-ISEL-NEXT:    fmov d0, x8
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smin1i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    fmov x8, d0
+; CHECK-GLOBAL-NEXT:    fmov x9, d1
+; CHECK-GLOBAL-NEXT:    cmp x8, x9
+; CHECK-GLOBAL-NEXT:    fcsel d0, d0, d1, lt
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <1 x i64> @llvm.smin.v1i64(<1 x i64> %a, <1 x i64> %b)
   ret <1 x i64> %c
 }
@@ -444,33 +728,93 @@ define <1 x i64> @smin1i64(<1 x i64> %a, <1 x i64> %b) {
 declare <2 x i64> @llvm.smin.v2i64(<2 x i64> %a, <2 x i64> %b) readnone
 
 define <2 x i64> @smin2i64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-LABEL: smin2i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov x8, v1.d[1]
-; CHECK-NEXT:    mov x9, v0.d[1]
-; CHECK-NEXT:    fmov x10, d1
-; CHECK-NEXT:    fmov x11, d0
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csel x8, x9, x8, lt
-; CHECK-NEXT:    cmp x11, x10
-; CHECK-NEXT:    csel x9, x11, x10, lt
-; CHECK-NEXT:    fmov d0, x9
-; CHECK-NEXT:    mov v0.d[1], x8
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: smin2i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    mov x8, v1.d[1]
+; CHECK-ISEL-NEXT:    mov x9, v0.d[1]
+; CHECK-ISEL-NEXT:    fmov x10, d1
+; CHECK-ISEL-NEXT:    fmov x11, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, lt
+; CHECK-ISEL-NEXT:    cmp x11, x10
+; CHECK-ISEL-NEXT:    csel x9, x11, x10, lt
+; CHECK-ISEL-NEXT:    fmov d0, x9
+; CHECK-ISEL-NEXT:    mov v0.d[1], x8
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smin2i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmgt v2.2d, v1.2d, v0.2d
+; CHECK-GLOBAL-NEXT:    shl v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %a, <2 x i64> %b)
   ret <2 x i64> %c
 }
 
+declare <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
+
+define void @smin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+; CHECK-ISEL-LABEL: smin4i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    mov x8, v2.d[1]
+; CHECK-ISEL-NEXT:    mov x9, v0.d[1]
+; CHECK-ISEL-NEXT:    fmov x10, d2
+; CHECK-ISEL-NEXT:    fmov x11, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, lt
+; CHECK-ISEL-NEXT:    cmp x11, x10
+; CHECK-ISEL-NEXT:    mov x9, v3.d[1]
+; CHECK-ISEL-NEXT:    csel x10, x11, x10, lt
+; CHECK-ISEL-NEXT:    mov x11, v1.d[1]
+; CHECK-ISEL-NEXT:    cmp x11, x9
+; CHECK-ISEL-NEXT:    fmov d0, x10
+; CHECK-ISEL-NEXT:    fmov x10, d3
+; CHECK-ISEL-NEXT:    csel x9, x11, x9, lt
+; CHECK-ISEL-NEXT:    fmov x11, d1
+; CHECK-ISEL-NEXT:    cmp x11, x10
+; CHECK-ISEL-NEXT:    csel x10, x11, x10, lt
+; CHECK-ISEL-NEXT:    fmov d1, x10
+; CHECK-ISEL-NEXT:    mov v0.d[1], x8
+; CHECK-ISEL-NEXT:    mov v1.d[1], x9
+; CHECK-ISEL-NEXT:    stp q0, q1, [x0]
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: smin4i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmgt v4.2d, v2.2d, v0.2d
+; CHECK-GLOBAL-NEXT:    cmgt v5.2d, v3.2d, v1.2d
+; CHECK-GLOBAL-NEXT:    shl v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    shl v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v2.16b, v4.16b
+; CHECK-GLOBAL-NEXT:    bif v1.16b, v3.16b, v5.16b
+; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
+; CHECK-GLOBAL-NEXT:    ret
+  %c = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %a, <4 x i64> %b)
+  store <4 x i64> %c, <4 x i64>* %p
+  ret void
+}
+
 declare i8 @llvm.umin.i8(i8 %a, i8 %b) readnone
 
 define i8 @umini8(i8 %a, i8 %b) {
-; CHECK-LABEL: umini8:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    and w8, w1, #0xff
-; CHECK-NEXT:    and w9, w0, #0xff
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, lo
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: umini8:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    and w8, w1, #0xff
+; CHECK-ISEL-NEXT:    and w9, w0, #0xff
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, lo
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umini8:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    and w8, w0, #0xff
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, uxtb
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, lo
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i8 @llvm.umin.i8(i8 %a, i8 %b)
   ret i8 %c
 }
@@ -478,13 +822,20 @@ define i8 @umini8(i8 %a, i8 %b) {
 declare i16 @llvm.umin.i16(i16 %a, i16 %b) readnone
 
 define i16 @umini16(i16 %a, i16 %b) {
-; CHECK-LABEL: umini16:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    and w8, w1, #0xffff
-; CHECK-NEXT:    and w9, w0, #0xffff
-; CHECK-NEXT:    cmp w9, w8
-; CHECK-NEXT:    csel w0, w9, w8, lo
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: umini16:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    and w8, w1, #0xffff
+; CHECK-ISEL-NEXT:    and w9, w0, #0xffff
+; CHECK-ISEL-NEXT:    cmp w9, w8
+; CHECK-ISEL-NEXT:    csel w0, w9, w8, lo
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umini16:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    and w8, w0, #0xffff
+; CHECK-GLOBAL-NEXT:    cmp w8, w1, uxth
+; CHECK-GLOBAL-NEXT:    csel w0, w0, w1, lo
+; CHECK-GLOBAL-NEXT:    ret
   %c = call i16 @llvm.umin.i16(i16 %a, i16 %b)
   ret i16 %c
 }
@@ -535,6 +886,20 @@ define <16 x i8> @umin16i8(<16 x i8> %a, <16 x i8> %b) {
   ret <16 x i8> %c
 }
 
+declare <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b) readnone
+
+define void @umin32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %p) {
+; CHECK-LABEL: umin32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umin v0.16b, v0.16b, v2.16b
+; CHECK-NEXT:    umin v1.16b, v1.16b, v3.16b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <32 x i8> @llvm.umin.v32i8(<32 x i8> %a, <32 x i8> %b)
+  store <32 x i8> %c, <32 x i8>* %p
+  ret void
+}
+
 declare <4 x i16> @llvm.umin.v4i16(<4 x i16> %a, <4 x i16> %b) readnone
 
 define <4 x i16> @umin4i16(<4 x i16> %a, <4 x i16> %b) {
@@ -557,6 +922,20 @@ define <8 x i16> @umin8i16(<8 x i16> %a, <8 x i16> %b) {
   ret <8 x i16> %c
 }
 
+declare <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b) readnone
+
+define void @umin16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %p) {
+; CHECK-LABEL: umin16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    umin v0.8h, v0.8h, v2.8h
+; CHECK-NEXT:    umin v1.8h, v1.8h, v3.8h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %c = call <16 x i16> @llvm.umin.v16i16(<16 x i16> %a, <16 x i16> %b)
+  store <16 x i16> %c, <16 x i16>* %p
+  ret void
+}
+
 declare <2 x i32> @llvm.umin.v2i32(<2 x i32> %a, <2 x i32> %b) readnone
 
 define <2 x i32> @umin2i32(<2 x i32> %a, <2 x i32> %b) {
@@ -579,19 +958,41 @@ define <4 x i32> @umin4i32(<4 x i32> %a, <4 x i32> %b) {
   ret <4 x i32> %c
 }
 
-declare <1 x i64> @llvm.umin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+declare <8 x i32> @llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b) readnone
 
-define <1 x i64> @umin1i64(<1 x i64> %a, <1 x i64> %b) {
-; CHECK-LABEL: umin1i64:
+define void @umin8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %p) {
+; CHECK-LABEL: umin8i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    // kill: def $d1 killed $d1 def $q1
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    fmov x8, d1
-; CHECK-NEXT:    fmov x9, d0
-; CHECK-NEXT:    cmp x9, x8
-; CHECK-NEXT:    csel x8, x9, x8, lo
-; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    umin v0.4s, v0.4s, v2.4s
+; CHECK-NEXT:    umin v1.4s, v1.4s, v3.4s
+; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
+  %c = call <8 x i32>@llvm.umin.v8i32(<8 x i32> %a, <8 x i32> %b)
+  store <8 x i32> %c, <8 x i32>* %p
+  ret void
+}
+
+declare <1 x i64> @llvm.umin.v1i64(<1 x i64> %a, <1 x i64> %b) readnone
+
+define <1 x i64> @umin1i64(<1 x i64> %a, <1 x i64> %b) {
+; CHECK-ISEL-LABEL: umin1i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    // kill: def $d1 killed $d1 def $q1
+; CHECK-ISEL-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-ISEL-NEXT:    fmov x8, d1
+; CHECK-ISEL-NEXT:    fmov x9, d0
+; CHECK-ISEL-NEXT:    cmp x9, x8
+; CHECK-ISEL-NEXT:    csel x8, x9, x8, lo
+; CHECK-ISEL-NEXT:    fmov d0, x8
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umin1i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    fmov x8, d0
+; CHECK-GLOBAL-NEXT:    fmov x9, d1
+; CHECK-GLOBAL-NEXT:    cmp x8, x9
+; CHECK-GLOBAL-NEXT:    fcsel d0, d0, d1, lo
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <1 x i64> @llvm.umin.v1i64(<1 x i64> %a, <1 x i64> %b)
   ret <1 x i64> %c
 }
@@ -599,11 +1000,48 @@ define <1 x i64> @umin1i64(<1 x i64> %a, <1 x i64> %b) {
 declare <2 x i64> @llvm.umin.v2i64(<2 x i64> %a, <2 x i64> %b) readnone
 
 define <2 x i64> @umin2i64(<2 x i64> %a, <2 x i64> %b) {
-; CHECK-LABEL: umin2i64:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    uqsub v1.2d, v0.2d, v1.2d
-; CHECK-NEXT:    sub v0.2d, v0.2d, v1.2d
-; CHECK-NEXT:    ret
+; CHECK-ISEL-LABEL: umin2i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    uqsub v1.2d, v0.2d, v1.2d
+; CHECK-ISEL-NEXT:    sub v0.2d, v0.2d, v1.2d
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umin2i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmhi v2.2d, v1.2d, v0.2d
+; CHECK-GLOBAL-NEXT:    shl v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v2.2d, v2.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v1.16b, v2.16b
+; CHECK-GLOBAL-NEXT:    ret
   %c = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %a, <2 x i64> %b)
   ret <2 x i64> %c
 }
+
+declare <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b) readnone
+
+define void @umin4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %p) {
+; CHECK-ISEL-LABEL: umin4i64:
+; CHECK-ISEL:       // %bb.0:
+; CHECK-ISEL-NEXT:    uqsub v2.2d, v0.2d, v2.2d
+; CHECK-ISEL-NEXT:    uqsub v3.2d, v1.2d, v3.2d
+; CHECK-ISEL-NEXT:    sub v0.2d, v0.2d, v2.2d
+; CHECK-ISEL-NEXT:    sub v1.2d, v1.2d, v3.2d
+; CHECK-ISEL-NEXT:    stp q0, q1, [x0]
+; CHECK-ISEL-NEXT:    ret
+;
+; CHECK-GLOBAL-LABEL: umin4i64:
+; CHECK-GLOBAL:       // %bb.0:
+; CHECK-GLOBAL-NEXT:    cmhi v4.2d, v2.2d, v0.2d
+; CHECK-GLOBAL-NEXT:    cmhi v5.2d, v3.2d, v1.2d
+; CHECK-GLOBAL-NEXT:    shl v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    shl v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v4.2d, v4.2d, #63
+; CHECK-GLOBAL-NEXT:    sshr v5.2d, v5.2d, #63
+; CHECK-GLOBAL-NEXT:    bif v0.16b, v2.16b, v4.16b
+; CHECK-GLOBAL-NEXT:    bif v1.16b, v3.16b, v5.16b
+; CHECK-GLOBAL-NEXT:    stp q0, q1, [x0]
+; CHECK-GLOBAL-NEXT:    ret
+  %c = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %a, <4 x i64> %b)
+  store <4 x i64> %c, <4 x i64>* %p
+  ret void
+}


        


More information about the llvm-commits mailing list