[llvm] 2d7e757 - [AArch64][SVE] Add patterns for some arith SVE instructions.

Danilo Carvalho Grael via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 13 08:33:48 PST 2020


Author: Danilo Carvalho Grael
Date: 2020-01-13T11:39:42-05:00
New Revision: 2d7e757a836abb54590daa25fce626283adafadf

URL: https://github.com/llvm/llvm-project/commit/2d7e757a836abb54590daa25fce626283adafadf
DIFF: https://github.com/llvm/llvm-project/commit/2d7e757a836abb54590daa25fce626283adafadf.diff

LOG: [AArch64][SVE] Add patterns for some arith SVE instructions.

Summary: Add patterns for the following instructions:
- smax, smin, umax, umin

Reviewers: sdesmalen, huntergr, rengolin, efriedma, c-rhodes, mgudim, kmclaughlin

Subscribers: amehsan

Differential Revision: https://reviews.llvm.org/D71779

Added: 
    llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64InstrFormats.td
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 5362c0e06470..a51aa85a931c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -271,6 +271,10 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
   bool SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SDValue &Shift);
 
   bool SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm);
+
+  bool SelectSVESignedArithImm(SDValue N, SDValue &Imm);
+
+  bool SelectSVEArithImm(SDValue N, SDValue &Imm);
 };
 } // end anonymous namespace
 
@@ -2909,6 +2913,31 @@ bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm, SD
   return false;
 }
 
+bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
+  if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
+    int64_t ImmVal = CNode->getSExtValue();
+    SDLoc DL(N);
+    if (ImmVal >= -127 && ImmVal < 127) {
+      Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
+      return true;
+    }
+  }
+  return false;
+}
+
+bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, SDValue &Imm) {
+  if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
+    uint64_t ImmVal = CNode->getSExtValue();
+    SDLoc DL(N);
+    ImmVal = ImmVal & 0xFF;
+    if (ImmVal < 256) {
+      Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
+      return true;
+    }
+  }
+  return false;
+}
+
 bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm) {
   if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
     uint64_t ImmVal = CNode->getZExtValue();

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index f4feceff0928..97343fd5304a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -188,6 +188,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::UADDSAT, VT, Legal);
       setOperationAction(ISD::SSUBSAT, VT, Legal);
       setOperationAction(ISD::USUBSAT, VT, Legal);
+      setOperationAction(ISD::SMAX, VT, Legal);
+      setOperationAction(ISD::UMAX, VT, Legal);
+      setOperationAction(ISD::SMIN, VT, Legal);
+      setOperationAction(ISD::UMIN, VT, Legal);
     }
 
     for (auto VT :

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index b22fc160d5d4..c3efe03a0987 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -305,7 +305,7 @@ def simm9 : Operand<i64>, ImmLeaf<i64, [{ return Imm >= -256 && Imm < 256; }]> {
 }
 
 def SImm8Operand : SImmOperand<8>;
-def simm8 : Operand<i64>, ImmLeaf<i64, [{ return Imm >= -128 && Imm < 127; }]> {
+def simm8 : Operand<i32>, ImmLeaf<i32, [{ return Imm >= -128 && Imm < 127; }]> {
   let ParserMatchClass = SImm8Operand;
   let DecoderMethod = "DecodeSImm<8>";
 }

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 900ae5ab7f03..82af6dc00746 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -130,10 +130,10 @@ let Predicates = [HasSVE] in {
   defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", xor>;
   defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", and>;
 
-  defm SMAX_ZI   : sve_int_arith_imm1<0b00, "smax", simm8>;
-  defm SMIN_ZI   : sve_int_arith_imm1<0b10, "smin", simm8>;
-  defm UMAX_ZI   : sve_int_arith_imm1<0b01, "umax", imm0_255>;
-  defm UMIN_ZI   : sve_int_arith_imm1<0b11, "umin", imm0_255>;
+  defm SMAX_ZI   : sve_int_arith_imm1<0b00, "smax", smax>;
+  defm SMIN_ZI   : sve_int_arith_imm1<0b10, "smin", smin>;
+  defm UMAX_ZI   : sve_int_arith_imm1_unsigned<0b01, "umax", umax>;
+  defm UMIN_ZI   : sve_int_arith_imm1_unsigned<0b11, "umin", umin>;
 
   defm MUL_ZI    : sve_int_arith_imm2<"mul">;
   defm MUL_ZPmZ   : sve_int_bin_pred_arit_2<0b000, "mul", int_aarch64_sve_mul>;

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 615629914993..385d1267be2b 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -212,6 +212,8 @@ def SVELogicalImm16Pat : ComplexPattern<i64, 1, "SelectSVELogicalImm<MVT::i16>",
 def SVELogicalImm32Pat : ComplexPattern<i64, 1, "SelectSVELogicalImm<MVT::i32>", []>;
 def SVELogicalImm64Pat : ComplexPattern<i64, 1, "SelectSVELogicalImm<MVT::i64>", []>;
 
+def SVEArithUImmPat  : ComplexPattern<i32, 1, "SelectSVEArithImm", []>;
+def SVEArithSImmPat  : ComplexPattern<i32, 1, "SelectSVESignedArithImm", []>;
 
 class SVEExactFPImm<string Suffix, string ValA, string ValB> : AsmOperandClass {
   let Name = "SVEExactFPImmOperand" # Suffix;
@@ -317,6 +319,11 @@ class SVE_1_Op_Imm_OptLsl_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty
   : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm, i32:$shift)))))),
         (inst $Op1, i32:$imm, i32:$shift)>;
 
+class SVE_1_Op_Imm_Arith_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
+                             ValueType it, ComplexPattern cpx, Instruction inst>
+  : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))),
+        (inst $Op1, i32:$imm)>;
+
 class SVE_1_Op_Imm_Log_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
                            ValueType it, ComplexPattern cpx, Instruction inst>
   : Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i64:$imm)))))),
@@ -3506,11 +3513,28 @@ class sve_int_arith_imm<bits<2> sz8_64, bits<6> opc, string asm,
   let ElementSize = ElementSizeNone;
 }
 
-multiclass sve_int_arith_imm1<bits<2> opc, string asm, Operand immtype> {
-  def _B : sve_int_arith_imm<0b00, { 0b1010, opc }, asm, ZPR8, immtype>;
-  def _H : sve_int_arith_imm<0b01, { 0b1010, opc }, asm, ZPR16, immtype>;
-  def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, immtype>;
-  def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, immtype>;
+multiclass sve_int_arith_imm1<bits<2> opc, string asm, SDPatternOperator op> {
+  def _B : sve_int_arith_imm<0b00, { 0b1010, opc }, asm, ZPR8, simm8>;
+  def _H : sve_int_arith_imm<0b01, { 0b1010, opc }, asm, ZPR16, simm8>;
+  def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, simm8>;
+  def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, simm8>;
+
+  def : SVE_1_Op_Imm_Arith_Pat<nxv16i8, op, ZPR8, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _B)>;
+  def : SVE_1_Op_Imm_Arith_Pat<nxv8i16, op, ZPR16, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _H)>;
+  def : SVE_1_Op_Imm_Arith_Pat<nxv4i32, op, ZPR32, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Imm_Arith_Pat<nxv2i64, op, ZPR64, i64, SVEArithSImmPat, !cast<Instruction>(NAME # _D)>;
+}
+
+multiclass sve_int_arith_imm1_unsigned<bits<2> opc, string asm, SDPatternOperator op> {
+  def _B : sve_int_arith_imm<0b00, { 0b1010, opc }, asm, ZPR8, imm0_255>;
+  def _H : sve_int_arith_imm<0b01, { 0b1010, opc }, asm, ZPR16, imm0_255>;
+  def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, imm0_255>;
+  def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, imm0_255>;
+
+  def : SVE_1_Op_Imm_Arith_Pat<nxv16i8, op, ZPR8, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _B)>;
+  def : SVE_1_Op_Imm_Arith_Pat<nxv8i16, op, ZPR16, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _H)>;
+  def : SVE_1_Op_Imm_Arith_Pat<nxv4i32, op, ZPR32, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Imm_Arith_Pat<nxv2i64, op, ZPR64, i64, SVEArithUImmPat, !cast<Instruction>(NAME # _D)>;
 }
 
 multiclass sve_int_arith_imm2<string asm> {

diff  --git a/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
new file mode 100644
index 000000000000..ec87c27e8d8b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-int-arith-imm.ll
@@ -0,0 +1,365 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+
+;
+; SMAX
+;
+define <vscale x 16 x i8> @smax_i8_pos(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: smax_i8_pos
+; CHECK: smax z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @smax_i8_neg(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: smax_i8_neg
+; CHECK: smax z0.b, z0.b, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 -58, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @smax_i16_pos(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: smax_i16_pos
+; CHECK: smax z0.h, z0.h, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @smax_i16_neg(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: smax_i16_neg
+; CHECK: smax z0.h, z0.h, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 -58, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @smax_i32_pos(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: smax_i32_pos
+; CHECK: smax z0.s, z0.s, #27
+; CHECK: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @smax_i32_neg(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: smax_i32_neg
+; CHECK: smax z0.s, z0.s, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 -58, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @smax_i64_pos(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: smax_i64_pos
+; CHECK: smax z0.d, z0.d, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @smax_i64_neg(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: smax_i64_neg
+; CHECK: smax z0.d, z0.d, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 -58, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp sgt <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; SMIN
+;
+define <vscale x 16 x i8> @smin_i8_pos(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: smin_i8_pos
+; CHECK: smin z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @smin_i8_neg(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: smin_i8_neg
+; CHECK: smin z0.b, z0.b, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 -58, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @smin_i16_pos(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: smin_i16_pos
+; CHECK: smin z0.h, z0.h, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @smin_i16_neg(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: smin_i16_neg
+; CHECK: smin z0.h, z0.h, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 -58, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @smin_i32_pos(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: smin_i32_pos
+; CHECK: smin z0.s, z0.s, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @smin_i32_neg(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: smin_i32_neg
+; CHECK: smin z0.s, z0.s, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 -58, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @smin_i64_pos(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: smin_i64_pos
+; CHECK: smin z0.d, z0.d, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @smin_i64_neg(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: smin_i64_neg
+; CHECK: smin z0.d, z0.d, #-58
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 -58, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp slt <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; UMAX
+;
+define <vscale x 16 x i8> @umax_i8_pos(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: umax_i8_pos
+; CHECK: umax z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @umax_i8_large(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: umax_i8_large
+; CHECK: umax z0.b, z0.b, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 129, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @umax_i16_pos(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: umax_i16_pos
+; CHECK: umax z0.h, z0.h, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @umax_i16_large(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: umax_i16_large
+; CHECK: umax z0.h, z0.h, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 129, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @umax_i32_pos(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: umax_i32_pos
+; CHECK: umax z0.s, z0.s, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @umax_i32_large(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: umax_i32_large
+; CHECK: umax z0.s, z0.s, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 129, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @umax_i64_pos(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: umax_i64_pos
+; CHECK: umax z0.d, z0.d, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @umax_i64_large(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: umax_i64_large
+; CHECK: umax z0.d, z0.d, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 129, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp ugt <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; UMIN
+;
+define <vscale x 16 x i8> @umin_i8_pos(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: umin_i8_pos
+; CHECK: umin z0.b, z0.b, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 27, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 16 x i8> @umin_i8_large(<vscale x 16 x i8> %a) {
+; CHECK-LABEL: umin_i8_large
+; CHECK: umin z0.b, z0.b, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 16 x i8> undef, i8 129, i32 0
+  %splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 16 x i8> %a, %splat
+  %res = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %splat
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @umin_i16_pos(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: umin_i16_pos
+; CHECK: umin z0.h, z0.h, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 27, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 8 x i16> @umin_i16_large(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: umin_i16_large
+; CHECK: umin z0.h, z0.h, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 8 x i16> undef, i16 129, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 8 x i16> %a, %splat
+  %res = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %splat
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @umin_i32_pos(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: umin_i32_pos
+; CHECK: umin z0.s, z0.s, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 27, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 4 x i32> @umin_i32_large(<vscale x 4 x i32> %a) {
+; CHECK-LABEL: umin_i32_large
+; CHECK: umin z0.s, z0.s, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 4 x i32> undef, i32 129, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 4 x i32> %a, %splat
+  %res = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %splat
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @umin_i64_pos(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: umin_i64_pos
+; CHECK: umin z0.d, z0.d, #27
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 27, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 2 x i64> @umin_i64_large(<vscale x 2 x i64> %a) {
+; CHECK-LABEL: umin_i64_large
+; CHECK: umin z0.d, z0.d, #129
+; CHECK-NEXT: ret
+  %elt = insertelement <vscale x 2 x i64> undef, i64 129, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %cmp = icmp ult <vscale x 2 x i64> %a, %splat
+  %res = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %splat
+  ret <vscale x 2 x i64> %res
+}


        


More information about the llvm-commits mailing list