[llvm] e2ccf7f - [SVE] Lower fixed length VECREDUCE_[SMAX|SMIN] to Scalable

Cameron McInally via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 25 07:58:28 PDT 2020


Author: Cameron McInally
Date: 2020-09-25T09:58:17-05:00
New Revision: e2ccf7f178ddaf202171a96cd6f41daa7a74a09c

URL: https://github.com/llvm/llvm-project/commit/e2ccf7f178ddaf202171a96cd6f41daa7a74a09c
DIFF: https://github.com/llvm/llvm-project/commit/e2ccf7f178ddaf202171a96cd6f41daa7a74a09c.diff

LOG: [SVE] Lower fixed length VECREDUCE_[SMAX|SMIN] to Scalable

This patch is pretty similar to the VECREDUCE_ADD patch, with some minor tweaks.

Results from the AArch64ISD::[SMAX|SMIN]V_PRED return element sized results. This requires an ANY_EXTEND for results < 32-bits, since Legalization promotes those results.

There is no NEON i64 vector support for SMAXV|SMINV, so use SVE for those.

Differential Revision: https://reviews.llvm.org/D88259

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 444b36ecf8c1..fd2198b38a44 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1092,6 +1092,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::UMAX, MVT::v2i64, Custom);
       setOperationAction(ISD::UMIN, MVT::v1i64, Custom);
       setOperationAction(ISD::UMIN, MVT::v2i64, Custom);
+      setOperationAction(ISD::VECREDUCE_SMAX, MVT::v2i64, Custom);
+      setOperationAction(ISD::VECREDUCE_SMIN, MVT::v2i64, Custom);
     }
   }
 
@@ -1219,6 +1221,10 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   setOperationAction(ISD::UMAX, VT, Custom);
   setOperationAction(ISD::UMIN, VT, Custom);
   setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
+  setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
   setOperationAction(ISD::VSELECT, VT, Custom);
   setOperationAction(ISD::XOR, VT, Custom);
   setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
@@ -9650,18 +9656,27 @@ static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
 
 SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
                                               SelectionDAG &DAG) const {
-  SDValue VecOp = Op.getOperand(0);
+  SDValue Src = Op.getOperand(0);
+  EVT SrcVT = Src.getValueType();
 
   SDLoc dl(Op);
   switch (Op.getOpcode()) {
   case ISD::VECREDUCE_ADD:
-    if (useSVEForFixedLengthVectorVT(VecOp.getValueType()))
+    if (useSVEForFixedLengthVectorVT(SrcVT))
       return LowerFixedLengthReductionToSVE(AArch64ISD::UADDV_PRED, Op, DAG);
     return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
-  case ISD::VECREDUCE_SMAX:
+  case ISD::VECREDUCE_SMAX: {
+    bool OverrideNEON = SrcVT.getVectorElementType() == MVT::i64;
+    if (useSVEForFixedLengthVectorVT(SrcVT, OverrideNEON))
+      return LowerFixedLengthReductionToSVE(AArch64ISD::SMAXV_PRED, Op, DAG);
     return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
-  case ISD::VECREDUCE_SMIN:
+  }
+  case ISD::VECREDUCE_SMIN: {
+    bool OverrideNEON = SrcVT.getVectorElementType() == MVT::i64;
+    if (useSVEForFixedLengthVectorVT(SrcVT, OverrideNEON))
+      return LowerFixedLengthReductionToSVE(AArch64ISD::SMINV_PRED, Op, DAG);
     return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG);
+  }
   case ISD::VECREDUCE_UMAX:
     return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG);
   case ISD::VECREDUCE_UMIN:
@@ -16001,10 +16016,9 @@ SDValue AArch64TargetLowering::LowerFixedLengthReductionToSVE(unsigned Opcode,
   SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT,
                             Rdx, DAG.getConstant(0, DL, MVT::i64));
 
-  // This is needed for UADDV, since it returns an i64 result. The VEC_REDUCE
-  // nodes expect an element size result.
+  // The VEC_REDUCE nodes expect an element size result.
   if (ResVT != ScalarOp.getValueType())
-    Res = DAG.getNode(ISD::TRUNCATE, DL, ScalarOp.getValueType(), Res);
+    Res = DAG.getAnyExtOrTrunc(Res, DL, ScalarOp.getValueType());
 
   return Res;
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
index a6557e03e63e..f5a332d2f287 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-reduce.ll
@@ -45,7 +45,7 @@ define i8 @uaddv_v16i8(<16 x i8> %a) #0 {
 define i8 @uaddv_v32i8(<32 x i8>* %a) #0 {
 ; CHECK-LABEL: uaddv_v32i8:
 ; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
-; VBITS_GE_256-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
 ; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
 ; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_256-NEXT: ret
@@ -57,7 +57,7 @@ define i8 @uaddv_v32i8(<32 x i8>* %a) #0 {
 define i8 @uaddv_v64i8(<64 x i8>* %a) #0 {
 ; CHECK-LABEL: uaddv_v64i8:
 ; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64
-; VBITS_GE_512-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
 ; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
 ; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_512-NEXT: ret
@@ -69,7 +69,7 @@ define i8 @uaddv_v64i8(<64 x i8>* %a) #0 {
 define i8 @uaddv_v128i8(<128 x i8>* %a) #0 {
 ; CHECK-LABEL: uaddv_v128i8:
 ; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128
-; VBITS_GE_1024-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
 ; VBITS_GE_1024-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
 ; VBITS_GE_1024-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_1024-NEXT: ret
@@ -81,7 +81,7 @@ define i8 @uaddv_v128i8(<128 x i8>* %a) #0 {
 define i8 @uaddv_v256i8(<256 x i8>* %a) #0 {
 ; CHECK-LABEL: uaddv_v256i8:
 ; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256
-; VBITS_GE_2048-DAG: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
 ; VBITS_GE_2048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].b
 ; VBITS_GE_2048-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_2048-NEXT: ret
@@ -111,7 +111,7 @@ define i16 @uaddv_v8i16(<8 x i16> %a) #0 {
 define i16 @uaddv_v16i16(<16 x i16>* %a) #0 {
 ; CHECK-LABEL: uaddv_v16i16:
 ; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
-; VBITS_GE_256-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
 ; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
 ; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_256-NEXT: ret
@@ -123,7 +123,7 @@ define i16 @uaddv_v16i16(<16 x i16>* %a) #0 {
 define i16 @uaddv_v32i16(<32 x i16>* %a) #0 {
 ; CHECK-LABEL: uaddv_v32i16:
 ; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
-; VBITS_GE_512-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
 ; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
 ; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_512-NEXT: ret
@@ -135,7 +135,7 @@ define i16 @uaddv_v32i16(<32 x i16>* %a) #0 {
 define i16 @uaddv_v64i16(<64 x i16>* %a) #0 {
 ; CHECK-LABEL: uaddv_v64i16:
 ; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].h, vl64
-; VBITS_GE_1048-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
 ; VBITS_GE_1048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
 ; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_1048-NEXT: ret
@@ -147,7 +147,7 @@ define i16 @uaddv_v64i16(<64 x i16>* %a) #0 {
 define i16 @uaddv_v128i16(<128 x i16>* %a) #0 {
 ; CHECK-LABEL: uaddv_v128i16:
 ; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
-; VBITS_GE_2048-DAG: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
 ; VBITS_GE_2048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].h
 ; VBITS_GE_2048-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_2048-NEXT: ret
@@ -177,7 +177,7 @@ define i32 @uaddv_v4i32(<4 x i32> %a) #0 {
 define i32 @uaddv_v8i32(<8 x i32>* %a) #0 {
 ; CHECK-LABEL: uaddv_v8i32:
 ; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
-; VBITS_GE_256-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
 ; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
 ; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_256-NEXT: ret
@@ -189,7 +189,7 @@ define i32 @uaddv_v8i32(<8 x i32>* %a) #0 {
 define i32 @uaddv_v16i32(<16 x i32>* %a) #0 {
 ; CHECK-LABEL: uaddv_v16i32:
 ; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
-; VBITS_GE_512-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
 ; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
 ; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_512-NEXT: ret
@@ -201,7 +201,7 @@ define i32 @uaddv_v16i32(<16 x i32>* %a) #0 {
 define i32 @uaddv_v32i32(<32 x i32>* %a) #0 {
 ; CHECK-LABEL: uaddv_v32i32:
 ; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].s, vl32
-; VBITS_GE_1048-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
 ; VBITS_GE_1048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
 ; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_1048-NEXT: ret
@@ -213,7 +213,7 @@ define i32 @uaddv_v32i32(<32 x i32>* %a) #0 {
 define i32 @uaddv_v64i32(<64 x i32>* %a) #0 {
 ; CHECK-LABEL: uaddv_v64i32:
 ; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].s, vl64
-; VBITS_GE_2096-DAG: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
 ; VBITS_GE_2096-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].s
 ; VBITS_GE_2086-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_2096-NEXT: ret
@@ -222,7 +222,7 @@ define i32 @uaddv_v64i32(<64 x i32>* %a) #0 {
   ret i32 %res
 }
 
-; Nothing to do for 64-bit vectors..
+; Nothing to do for single element vectors.
 define i64 @uaddv_v1i64(<1 x i64> %a) #0 {
 ; CHECK-LABEL: uaddv_v1i64:
 ; CHECK: fmov x0, d0
@@ -243,7 +243,7 @@ define i64 @uaddv_v2i64(<2 x i64> %a) #0 {
 define i64 @uaddv_v4i64(<4 x i64>* %a) #0 {
 ; CHECK-LABEL: uaddv_v4i64:
 ; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
-; VBITS_GE_256-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
 ; VBITS_GE_256-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
 ; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_256-NEXT: ret
@@ -255,7 +255,7 @@ define i64 @uaddv_v4i64(<4 x i64>* %a) #0 {
 define i64 @uaddv_v8i64(<8 x i64>* %a) #0 {
 ; CHECK-LABEL: uaddv_v8i64:
 ; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
-; VBITS_GE_512-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
 ; VBITS_GE_512-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
 ; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_512-NEXT: ret
@@ -267,7 +267,7 @@ define i64 @uaddv_v8i64(<8 x i64>* %a) #0 {
 define i64 @uaddv_v16i64(<16 x i64>* %a) #0 {
 ; CHECK-LABEL: uaddv_v16i64:
 ; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].d, vl16
-; VBITS_GE_1048-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
 ; VBITS_GE_1048-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
 ; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_1048-NEXT: ret
@@ -279,7 +279,7 @@ define i64 @uaddv_v16i64(<16 x i64>* %a) #0 {
 define i64 @uaddv_v32i64(<32 x i64>* %a) #0 {
 ; CHECK-LABEL: uaddv_v32i64:
 ; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].d, vl32
-; VBITS_GE_2096-DAG: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
 ; VBITS_GE_2096-NEXT: uaddv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
 ; VBITS_GE_2096-NEXT: fmov x0, [[REDUCE]]
 ; VBITS_GE_2096-NEXT: ret
@@ -288,6 +288,546 @@ define i64 @uaddv_v32i64(<32 x i64>* %a) #0 {
   ret i64 %res
 }
 
+;
+; SMAXV
+;
+
+; Don't use SVE for 64-bit vectors.
+define i8 @smaxv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: smaxv_v8i8:
+; CHECK: smaxv b0, v0.8b
+; CHECK: ret
+  %res = call i8 @llvm.experimental.vector.reduce.smax.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i8 @smaxv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: smaxv_v16i8:
+; CHECK: smaxv b0, v0.16b
+; CHECK: ret
+  %res = call i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @smaxv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: smaxv_v32i8:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
+; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: smaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smax.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @smaxv_v64i8(<64 x i8>* %a) #0 {
+; CHECK-LABEL: smaxv_v64i8:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64
+; VBITS_GE_512-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: smaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_512-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <64 x i8>, <64 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smax.v64i8(<64 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @smaxv_v128i8(<128 x i8>* %a) #0 {
+; CHECK-LABEL: smaxv_v128i8:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128
+; VBITS_GE_1024-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: smaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_1024-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <128 x i8>, <128 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smax.v128i8(<128 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @smaxv_v256i8(<256 x i8>* %a) #0 {
+; CHECK-LABEL: smaxv_v256i8:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256
+; VBITS_GE_2048-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: smaxv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_2048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <256 x i8>, <256 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smax.v256i8(<256 x i8> %op)
+  ret i8 %res
+}
+
+; Don't use SVE for 64-bit vectors.
+define i16 @smaxv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: smaxv_v4i16:
+; CHECK: smaxv h0, v0.4h
+; CHECK: ret
+  %res = call i16 @llvm.experimental.vector.reduce.smax.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i16 @smaxv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: smaxv_v8i16:
+; CHECK: smaxv h0, v0.8h
+; CHECK: ret
+  %res = call i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @smaxv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: smaxv_v16i16:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
+; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: smaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smax.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @smaxv_v32i16(<32 x i16>* %a) #0 {
+; CHECK-LABEL: smaxv_v32i16:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
+; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: smaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_512-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <32 x i16>, <32 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smax.v32i16(<32 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @smaxv_v64i16(<64 x i16>* %a) #0 {
+; CHECK-LABEL: smaxv_v64i16:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].h, vl64
+; VBITS_GE_1048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: smaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_1048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+  %op = load <64 x i16>, <64 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smax.v64i16(<64 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @smaxv_v128i16(<128 x i16>* %a) #0 {
+; CHECK-LABEL: smaxv_v128i16:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
+; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: smaxv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_2048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <128 x i16>, <128 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smax.v128i16(<128 x i16> %op)
+  ret i16 %res
+}
+
+; Don't use SVE for 64-bit vectors.
+define i32 @smaxv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: smaxv_v2i32:
+; CHECK: smaxp v0.2s, v0.2s
+; CHECK: ret
+  %res = call i32 @llvm.experimental.vector.reduce.smax.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i32 @smaxv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: smaxv_v4i32:
+; CHECK: smaxv s0, v0.4s
+; CHECK: ret
+  %res = call i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @smaxv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: smaxv_v8i32:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
+; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: smaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smax.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @smaxv_v16i32(<16 x i32>* %a) #0 {
+; CHECK-LABEL: smaxv_v16i32:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
+; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: smaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_512-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <16 x i32>, <16 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smax.v16i32(<16 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @smaxv_v32i32(<32 x i32>* %a) #0 {
+; CHECK-LABEL: smaxv_v32i32:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].s, vl32
+; VBITS_GE_1048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: smaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+  %op = load <32 x i32>, <32 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smax.v32i32(<32 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @smaxv_v64i32(<64 x i32>* %a) #0 {
+; CHECK-LABEL: smaxv_v64i32:
+; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].s, vl64
+; VBITS_GE_2096-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: smaxv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_2086-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2096-NEXT: ret
+  %op = load <64 x i32>, <64 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smax.v64i32(<64 x i32> %op)
+  ret i32 %res
+}
+
+; Nothing to do for single element vectors.
+define i64 @smaxv_v1i64(<1 x i64> %a) #0 {
+; CHECK-LABEL: smaxv_v1i64:
+; CHECK: fmov x0, d0
+; CHECK: ret
+  %res = call i64 @llvm.experimental.vector.reduce.smax.v1i64(<1 x i64> %a)
+  ret i64 %res
+}
+
+; No NEON 64-bit vector SMAXV support. Use SVE.
+define i64 @smaxv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: smaxv_v2i64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl2
+; CHECK-NEXT: smaxv [[REDUCE:d[0-9]+]], [[PG]], z0.d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
+  %res = call i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @smaxv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: smaxv_v4i64:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
+; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: smaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smax.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @smaxv_v8i64(<8 x i64>* %a) #0 {
+; CHECK-LABEL: smaxv_v8i64:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
+; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: smaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <8 x i64>, <8 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smax.v8i64(<8 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @smaxv_v16i64(<16 x i64>* %a) #0 {
+; CHECK-LABEL: smaxv_v16i64:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].d, vl16
+; VBITS_GE_1048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: smaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+  %op = load <16 x i64>, <16 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smax.v16i64(<16 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @smaxv_v32i64(<32 x i64>* %a) #0 {
+; CHECK-LABEL: smaxv_v32i64:
+; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].d, vl32
+; VBITS_GE_2096-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: smaxv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_2096-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2096-NEXT: ret
+  %op = load <32 x i64>, <32 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smax.v32i64(<32 x i64> %op)
+  ret i64 %res
+}
+
+;
+; SMINV
+;
+
+; Don't use SVE for 64-bit vectors.
+define i8 @sminv_v8i8(<8 x i8> %a) #0 {
+; CHECK-LABEL: sminv_v8i8:
+; CHECK: sminv b0, v0.8b
+; CHECK: ret
+  %res = call i8 @llvm.experimental.vector.reduce.smin.v8i8(<8 x i8> %a)
+  ret i8 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i8 @sminv_v16i8(<16 x i8> %a) #0 {
+; CHECK-LABEL: sminv_v16i8:
+; CHECK: sminv b0, v0.16b
+; CHECK: ret
+  %res = call i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8> %a)
+  ret i8 %res
+}
+
+define i8 @sminv_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: sminv_v32i8:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].b, vl32
+; VBITS_GE_256-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: sminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smin.v32i8(<32 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @sminv_v64i8(<64 x i8>* %a) #0 {
+; CHECK-LABEL: sminv_v64i8:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64
+; VBITS_GE_512-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: sminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_512-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <64 x i8>, <64 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smin.v64i8(<64 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @sminv_v128i8(<128 x i8>* %a) #0 {
+; CHECK-LABEL: sminv_v128i8:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128
+; VBITS_GE_1024-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: sminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_1024-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <128 x i8>, <128 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smin.v128i8(<128 x i8> %op)
+  ret i8 %res
+}
+
+define i8 @sminv_v256i8(<256 x i8>* %a) #0 {
+; CHECK-LABEL: sminv_v256i8:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256
+; VBITS_GE_2048-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: sminv b[[REDUCE:[0-9]+]], [[PG]], [[OP]].b
+; VBITS_GE_2048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <256 x i8>, <256 x i8>* %a
+  %res = call i8 @llvm.experimental.vector.reduce.smin.v256i8(<256 x i8> %op)
+  ret i8 %res
+}
+
+; Don't use SVE for 64-bit vectors.
+define i16 @sminv_v4i16(<4 x i16> %a) #0 {
+; CHECK-LABEL: sminv_v4i16:
+; CHECK: sminv h0, v0.4h
+; CHECK: ret
+  %res = call i16 @llvm.experimental.vector.reduce.smin.v4i16(<4 x i16> %a)
+  ret i16 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i16 @sminv_v8i16(<8 x i16> %a) #0 {
+; CHECK-LABEL: sminv_v8i16:
+; CHECK: sminv h0, v0.8h
+; CHECK: ret
+  %res = call i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16> %a)
+  ret i16 %res
+}
+
+define i16 @sminv_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: sminv_v16i16:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].h, vl16
+; VBITS_GE_256-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: sminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_256-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smin.v16i16(<16 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @sminv_v32i16(<32 x i16>* %a) #0 {
+; CHECK-LABEL: sminv_v32i16:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
+; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: sminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_512-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <32 x i16>, <32 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smin.v32i16(<32 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @sminv_v64i16(<64 x i16>* %a) #0 {
+; CHECK-LABEL: sminv_v64i16:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].h, vl64
+; VBITS_GE_1048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: sminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_1048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+  %op = load <64 x i16>, <64 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smin.v64i16(<64 x i16> %op)
+  ret i16 %res
+}
+
+define i16 @sminv_v128i16(<128 x i16>* %a) #0 {
+; CHECK-LABEL: sminv_v128i16:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
+; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: sminv h[[REDUCE:[0-9]+]], [[PG]], [[OP]].h
+; VBITS_GE_2048-NEXT: fmov w0, s[[REDUCE]]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <128 x i16>, <128 x i16>* %a
+  %res = call i16 @llvm.experimental.vector.reduce.smin.v128i16(<128 x i16> %op)
+  ret i16 %res
+}
+
+; Don't use SVE for 64-bit vectors.
+define i32 @sminv_v2i32(<2 x i32> %a) #0 {
+; CHECK-LABEL: sminv_v2i32:
+; CHECK: minp v0.2s, v0.2s
+; CHECK: ret
+  %res = call i32 @llvm.experimental.vector.reduce.smin.v2i32(<2 x i32> %a)
+  ret i32 %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define i32 @sminv_v4i32(<4 x i32> %a) #0 {
+; CHECK-LABEL: sminv_v4i32:
+; CHECK: sminv s0, v0.4s
+; CHECK: ret
+  %res = call i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32> %a)
+  ret i32 %res
+}
+
+define i32 @sminv_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: sminv_v8i32:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].s, vl8
+; VBITS_GE_256-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: sminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_256-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smin.v8i32(<8 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @sminv_v16i32(<16 x i32>* %a) #0 {
+; CHECK-LABEL: sminv_v16i32:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
+; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: sminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_512-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <16 x i32>, <16 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smin.v16i32(<16 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @sminv_v32i32(<32 x i32>* %a) #0 {
+; CHECK-LABEL: sminv_v32i32:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].s, vl32
+; VBITS_GE_1048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: sminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_1048-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+  %op = load <32 x i32>, <32 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smin.v32i32(<32 x i32> %op)
+  ret i32 %res
+}
+
+define i32 @sminv_v64i32(<64 x i32>* %a) #0 {
+; CHECK-LABEL: sminv_v64i32:
+; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].s, vl64
+; VBITS_GE_2096-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: sminv [[REDUCE:s[0-9]+]], [[PG]], [[OP]].s
+; VBITS_GE_2086-NEXT: fmov w0, [[REDUCE]]
+; VBITS_GE_2096-NEXT: ret
+  %op = load <64 x i32>, <64 x i32>* %a
+  %res = call i32 @llvm.experimental.vector.reduce.smin.v64i32(<64 x i32> %op)
+  ret i32 %res
+}
+
+; Nothing to do for single element vectors.
+define i64 @sminv_v1i64(<1 x i64> %a) #0 {
+; CHECK-LABEL: sminv_v1i64:
+; CHECK: fmov x0, d0
+; CHECK: ret
+  %res = call i64 @llvm.experimental.vector.reduce.smin.v1i64(<1 x i64> %a)
+  ret i64 %res
+}
+
+; No NEON 64-bit vector SMINV support. Use SVE.
+define i64 @sminv_v2i64(<2 x i64> %a) #0 {
+; CHECK-LABEL: sminv_v2i64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl2
+; CHECK-NEXT: sminv [[REDUCE:d[0-9]+]], [[PG]], z0.d
+; CHECK-NEXT: fmov x0, [[REDUCE]]
+; CHECK-NEXT: ret
+  %res = call i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64> %a)
+  ret i64 %res
+}
+
+define i64 @sminv_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: sminv_v4i64:
+; VBITS_GE_256: ptrue [[PG:p[0-9]+]].d, vl4
+; VBITS_GE_256-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_256-NEXT: sminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_256-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_256-NEXT: ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smin.v4i64(<4 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @sminv_v8i64(<8 x i64>* %a) #0 {
+; CHECK-LABEL: sminv_v8i64:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
+; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: sminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_512-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_512-NEXT: ret
+  %op = load <8 x i64>, <8 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smin.v8i64(<8 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @sminv_v16i64(<16 x i64>* %a) #0 {
+; CHECK-LABEL: sminv_v16i64:
+; VBITS_GE_1048: ptrue [[PG:p[0-9]+]].d, vl16
+; VBITS_GE_1048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_1048-NEXT: sminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_1048-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_1048-NEXT: ret
+  %op = load <16 x i64>, <16 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smin.v16i64(<16 x i64> %op)
+  ret i64 %res
+}
+
+define i64 @sminv_v32i64(<32 x i64>* %a) #0 {
+; CHECK-LABEL: sminv_v32i64:
+; VBITS_GE_2096: ptrue [[PG:p[0-9]+]].d, vl32
+; VBITS_GE_2096-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_2096-NEXT: sminv [[REDUCE:d[0-9]+]], [[PG]], [[OP]].d
+; VBITS_GE_2096-NEXT: fmov x0, [[REDUCE]]
+; VBITS_GE_2096-NEXT: ret
+  %op = load <32 x i64>, <32 x i64>* %a
+  %res = call i64 @llvm.experimental.vector.reduce.smin.v32i64(<32 x i64> %op)
+  ret i64 %res
+}
+
 attributes #0 = { "target-features"="+sve" }
 
 declare i8 @llvm.experimental.vector.reduce.add.v8i8(<8 x i8>)
@@ -317,3 +857,59 @@ declare i64 @llvm.experimental.vector.reduce.add.v4i64(<4 x i64>)
 declare i64 @llvm.experimental.vector.reduce.add.v8i64(<8 x i64>)
 declare i64 @llvm.experimental.vector.reduce.add.v16i64(<16 x i64>)
 declare i64 @llvm.experimental.vector.reduce.add.v32i64(<32 x i64>)
+
+declare i8 @llvm.experimental.vector.reduce.smax.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smax.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smax.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smax.v64i8(<64 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smax.v128i8(<128 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smax.v256i8(<256 x i8>)
+
+declare i16 @llvm.experimental.vector.reduce.smax.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smax.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smax.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smax.v32i16(<32 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smax.v64i16(<64 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smax.v128i16(<128 x i16>)
+
+declare i32 @llvm.experimental.vector.reduce.smax.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smax.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smax.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smax.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smax.v32i32(<32 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smax.v64i32(<64 x i32>)
+
+declare i64 @llvm.experimental.vector.reduce.smax.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smax.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smax.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smax.v8i64(<8 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smax.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smax.v32i64(<32 x i64>)
+
+declare i8 @llvm.experimental.vector.reduce.smin.v8i8(<8 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smin.v16i8(<16 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smin.v32i8(<32 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smin.v64i8(<64 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smin.v128i8(<128 x i8>)
+declare i8 @llvm.experimental.vector.reduce.smin.v256i8(<256 x i8>)
+
+declare i16 @llvm.experimental.vector.reduce.smin.v4i16(<4 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smin.v8i16(<8 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smin.v16i16(<16 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smin.v32i16(<32 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smin.v64i16(<64 x i16>)
+declare i16 @llvm.experimental.vector.reduce.smin.v128i16(<128 x i16>)
+
+declare i32 @llvm.experimental.vector.reduce.smin.v2i32(<2 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smin.v4i32(<4 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smin.v8i32(<8 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smin.v16i32(<16 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smin.v32i32(<32 x i32>)
+declare i32 @llvm.experimental.vector.reduce.smin.v64i32(<64 x i32>)
+
+declare i64 @llvm.experimental.vector.reduce.smin.v1i64(<1 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smin.v2i64(<2 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smin.v4i64(<4 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smin.v8i64(<8 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smin.v16i64(<16 x i64>)
+declare i64 @llvm.experimental.vector.reduce.smin.v32i64(<32 x i64>)


        


More information about the llvm-commits mailing list