[llvm] 3ed59b7 - [SVE] Implement lowering for fixed length vector multiplication.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 6 03:04:07 PDT 2020


Author: Paul Walker
Date: 2020-08-06T11:01:39+01:00
New Revision: 3ed59b775d7ce54ef8e881b31ce8917b84470dfb

URL: https://github.com/llvm/llvm-project/commit/3ed59b775d7ce54ef8e881b31ce8917b84470dfb
DIFF: https://github.com/llvm/llvm-project/commit/3ed59b775d7ce54ef8e881b31ce8917b84470dfb.diff

LOG: [SVE] Implement lowering for fixed length vector multiplication.

NOTE: Also uses SVE code generation for NEON size vectors, instead
of expanding i64 based vector multiplications.

Differential Revision: https://reviews.llvm.org/D85327

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9b78428978ec..ce7cb3e7b9a2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -979,6 +979,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::TRUNCATE, VT, Custom);
       for (auto VT : {MVT::v8f16, MVT::v4f32})
         setOperationAction(ISD::FP_ROUND, VT, Expand);
+
+      // These operations are not supported on NEON but SVE can do them.
+      setOperationAction(ISD::MUL, MVT::v1i64, Custom);
+      setOperationAction(ISD::MUL, MVT::v2i64, Custom);
     }
   }
 
@@ -1083,6 +1087,7 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   setOperationAction(ISD::FMUL, VT, Custom);
   setOperationAction(ISD::FSUB, VT, Custom);
   setOperationAction(ISD::LOAD, VT, Custom);
+  setOperationAction(ISD::MUL, VT, Custom);
   setOperationAction(ISD::OR, VT, Custom);
   setOperationAction(ISD::STORE, VT, Custom);
   setOperationAction(ISD::TRUNCATE, VT, Custom);
@@ -1399,6 +1404,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
     MAKE_CASE(AArch64ISD::THREAD_POINTER)
     MAKE_CASE(AArch64ISD::TLSDESC_CALLSEQ)
     MAKE_CASE(AArch64ISD::ADD_PRED)
+    MAKE_CASE(AArch64ISD::MUL_PRED)
     MAKE_CASE(AArch64ISD::SDIV_PRED)
     MAKE_CASE(AArch64ISD::SHL_PRED)
     MAKE_CASE(AArch64ISD::SMAX_PRED)
@@ -3090,10 +3096,17 @@ SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
   return DAG.getMergeValues({AND, Chain}, dl);
 }
 
-static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
+SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
+  EVT VT = Op.getValueType();
+
+  // If SVE is available then i64 vector multiplications can also be made legal.
+  bool OverrideNEON = VT == MVT::v2i64 || VT == MVT::v1i64;
+
+  if (useSVEForFixedLengthVectorVT(VT, OverrideNEON))
+    return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED, OverrideNEON);
+
   // Multiplications are only custom-lowered for 128-bit vectors so that
   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
-  EVT VT = Op.getValueType();
   assert(VT.is128BitVector() && VT.isInteger() &&
          "unexpected type for custom-lowering ISD::MUL");
   SDNode *N0 = Op.getOperand(0).getNode();
@@ -3627,20 +3640,19 @@ bool AArch64TargetLowering::useSVEForFixedLengthVectors() const {
   return Subtarget->hasSVE() && Subtarget->getMinSVEVectorSizeInBits() >= 256;
 }
 
-bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(EVT VT) const {
+bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
+    EVT VT, bool OverrideNEON) const {
   if (!useSVEForFixedLengthVectors())
     return false;
 
   if (!VT.isFixedLengthVector())
     return false;
 
-  // Fixed length predicates should be promoted to i8.
-  // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work.
-  if (VT.getVectorElementType() == MVT::i1)
-    return false;
-
   // Don't use SVE for vectors we cannot scalarize if required.
   switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
+  // Fixed length predicates should be promoted to i8.
+  // NOTE: This is consistent with how NEON (and thus 64/128bit vectors) work.
+  case MVT::i1:
   default:
     return false;
   case MVT::i8:
@@ -3653,6 +3665,10 @@ bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(EVT VT) const {
     break;
   }
 
+  // All SVE implementations support NEON sized vectors.
+  if (OverrideNEON && (VT.is128BitVector() || VT.is64BitVector()))
+    return true;
+
   // Ensure NEON MVTs only belong to a single register class.
   if (VT.getSizeInBits() <= 128)
     return false;
@@ -8801,7 +8817,7 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
   unsigned Idx = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
 
   // We don't have any patterns for scalable vector yet.
-  if (InVT.isScalableVector() || !useSVEForFixedLengthVectorVT(InVT))
+  if (InVT.isScalableVector())
     return SDValue();
 
   // This will be matched by custom code during ISelDAGToDAG.
@@ -15252,12 +15268,13 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
 // the original operation's type is used to construct a suitable predicate.
 SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
                                                    SelectionDAG &DAG,
-                                                   unsigned NewOp) const {
+                                                   unsigned NewOp,
+                                                   bool OverrideNEON) const {
   EVT VT = Op.getValueType();
   SDLoc DL(Op);
   auto Pg = getPredicateForVector(DAG, DL, VT);
 
-  if (useSVEForFixedLengthVectorVT(VT)) {
+  if (useSVEForFixedLengthVectorVT(VT, OverrideNEON)) {
     EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
 
     // Create list of operands by converting existing ones to scalable types.
@@ -15268,7 +15285,7 @@ SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
         continue;
       }
 
-      assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
+      assert(useSVEForFixedLengthVectorVT(V.getValueType(), OverrideNEON) &&
              "Only fixed length vectors are supported!");
       Operands.push_back(convertToScalableVector(DAG, ContainerVT, V));
     }

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 08f00361bf60..1d0f35690c2d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -79,6 +79,7 @@ enum NodeType : unsigned {
   FMA_PRED,
   FMUL_PRED,
   FSUB_PRED,
+  MUL_PRED,
   SDIV_PRED,
   SHL_PRED,
   SMAX_PRED,
@@ -859,12 +860,13 @@ class AArch64TargetLowering : public TargetLowering {
   SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
-  SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
-                              unsigned NewOp) const;
+  SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG, unsigned NewOp,
+                              bool OverrideNEON = false) const;
   SDValue LowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerDIV(SDValue Op, SelectionDAG &DAG) const;
+  SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
@@ -964,7 +966,10 @@ class AArch64TargetLowering : public TargetLowering {
                       const TargetTransformInfo *TTI) const override;
 
   bool useSVEForFixedLengthVectors() const;
-  bool useSVEForFixedLengthVectorVT(EVT VT) const;
+  // Normally SVE is only used for byte size vectors that do not fit within a
+  // NEON vector. This changes when OverrideNEON is true, allowing SVE to be
+  // used for 64bit and 128bit vectors as well.
+  bool useSVEForFixedLengthVectorVT(EVT VT, bool OverrideNEON = false) const;
 };
 
 namespace AArch64 {

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index cf34e5f6470d..bedbc7f39d91 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -182,6 +182,7 @@ def AArch64fmul_p : SDNode<"AArch64ISD::FMUL_PRED", SDT_AArch64Arith>;
 def AArch64fsub_p : SDNode<"AArch64ISD::FSUB_PRED", SDT_AArch64Arith>;
 def AArch64lsl_p  : SDNode<"AArch64ISD::SHL_PRED",  SDT_AArch64Arith>;
 def AArch64lsr_p  : SDNode<"AArch64ISD::SRL_PRED",  SDT_AArch64Arith>;
+def AArch64mul_p  : SDNode<"AArch64ISD::MUL_PRED",  SDT_AArch64Arith>;
 def AArch64sdiv_p : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64Arith>;
 def AArch64smax_p : SDNode<"AArch64ISD::SMAX_PRED", SDT_AArch64Arith>;
 def AArch64smin_p : SDNode<"AArch64ISD::SMIN_PRED", SDT_AArch64Arith>;
@@ -291,9 +292,11 @@ let Predicates = [HasSVE] in {
   defm UMIN_ZI   : sve_int_arith_imm1_unsigned<0b11, "umin", AArch64umin_p>;
 
   defm MUL_ZI     : sve_int_arith_imm2<"mul", mul>;
-  defm MUL_ZPmZ   : sve_int_bin_pred_arit_2<0b000, "mul",   int_aarch64_sve_mul>;
-  defm SMULH_ZPmZ : sve_int_bin_pred_arit_2<0b010, "smulh", int_aarch64_sve_smulh>;
-  defm UMULH_ZPmZ : sve_int_bin_pred_arit_2<0b011, "umulh", int_aarch64_sve_umulh>;
+  defm MUL_ZPmZ   : sve_int_bin_pred_arit_2<0b000, "mul",   "MUL_ZPZZ",   int_aarch64_sve_mul,   DestructiveBinaryComm>;
+  defm SMULH_ZPmZ : sve_int_bin_pred_arit_2<0b010, "smulh", "SMULH_ZPZZ", int_aarch64_sve_smulh, DestructiveBinaryComm>;
+  defm UMULH_ZPmZ : sve_int_bin_pred_arit_2<0b011, "umulh", "UMULH_ZPZZ", int_aarch64_sve_umulh, DestructiveBinaryComm>;
+
+  defm MUL_ZPZZ   : sve_int_bin_pred_bhsd<AArch64mul_p>;
 
   // Add unpredicated alternative for the mul instruction.
   def : Pat<(mul nxv16i8:$Op1, nxv16i8:$Op2),

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index c5dd327d2aef..8b5722ec94c2 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -2402,11 +2402,19 @@ multiclass sve_int_bin_pred_arit_1<bits<3> opc, string asm, string Ps,
   def : SVE_3_Op_Pat<nxv2i64, op, nxv2i1, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
-multiclass sve_int_bin_pred_arit_2<bits<3> opc, string asm, SDPatternOperator op> {
-  def _B : sve_int_bin_pred_arit_log<0b00, 0b10, opc, asm, ZPR8>;
-  def _H : sve_int_bin_pred_arit_log<0b01, 0b10, opc, asm, ZPR16>;
-  def _S : sve_int_bin_pred_arit_log<0b10, 0b10, opc, asm, ZPR32>;
-  def _D : sve_int_bin_pred_arit_log<0b11, 0b10, opc, asm, ZPR64>;
+multiclass sve_int_bin_pred_arit_2<bits<3> opc, string asm, string Ps,
+                                   SDPatternOperator op,
+                                   DestructiveInstTypeEnum flags> {
+  let DestructiveInstType = flags in {
+  def _B : sve_int_bin_pred_arit_log<0b00, 0b10, opc, asm, ZPR8>,
+             SVEPseudo2Instr<Ps # _B, 1>;
+  def _H : sve_int_bin_pred_arit_log<0b01, 0b10, opc, asm, ZPR16>,
+             SVEPseudo2Instr<Ps # _H, 1>;
+  def _S : sve_int_bin_pred_arit_log<0b10, 0b10, opc, asm, ZPR32>,
+             SVEPseudo2Instr<Ps # _S, 1>;
+  def _D : sve_int_bin_pred_arit_log<0b11, 0b10, opc, asm, ZPR64>,
+             SVEPseudo2Instr<Ps # _D, 1>;
+  }
 
   def : SVE_3_Op_Pat<nxv16i8, op, nxv16i1, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _B)>;
   def : SVE_3_Op_Pat<nxv8i16, op, nxv8i1, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _H)>;

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
index 51970b01192f..9f172e51d85d 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-arith.ll
@@ -410,4 +410,323 @@ define void @add_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
   ret void
 }
 
+;
+; NOTE: Tests beyond this point only have CHECK lines to validate the first
+; VBYTES because the add tests already validate the legalisation code paths.
+;
+
+; Don't use SVE for 64-bit vectors.
+define <8 x i8> @mul_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
+; CHECK-LABEL: @mul_v8i8
+; CHECK: mul v0.8b, v0.8b, v1.8b
+; CHECK: ret
+  %res = mul <8 x i8> %op1, %op2
+  ret <8 x i8> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <16 x i8> @mul_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
+; CHECK-LABEL: @mul_v16i8
+; CHECK: mul v0.16b, v0.16b, v1.16b
+; CHECK: ret
+  %res = mul <16 x i8> %op1, %op2
+  ret <16 x i8> %res
+}
+
+define void @mul_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+; CHECK-LABEL: @mul_v32i8
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
+; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
+; CHECK: st1b { [[RES]].b }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <32 x i8>, <32 x i8>* %a
+  %op2 = load <32 x i8>, <32 x i8>* %b
+  %res = mul <32 x i8> %op1, %op2
+  store <32 x i8> %res, <32 x i8>* %a
+  ret void
+}
+
+define void @mul_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
+; CHECK-LABEL: @mul_v64i8
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,64)]]
+; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
+; CHECK: st1b { [[RES]].b }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <64 x i8>, <64 x i8>* %a
+  %op2 = load <64 x i8>, <64 x i8>* %b
+  %res = mul <64 x i8> %op1, %op2
+  store <64 x i8> %res, <64 x i8>* %a
+  ret void
+}
+
+define void @mul_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
+; CHECK-LABEL: @mul_v128i8
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,128)]]
+; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
+; CHECK: st1b { [[RES]].b }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <128 x i8>, <128 x i8>* %a
+  %op2 = load <128 x i8>, <128 x i8>* %b
+  %res = mul <128 x i8> %op1, %op2
+  store <128 x i8> %res, <128 x i8>* %a
+  ret void
+}
+
+define void @mul_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
+; CHECK-LABEL: @mul_v256i8
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,256)]]
+; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
+; CHECK: st1b { [[RES]].b }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <256 x i8>, <256 x i8>* %a
+  %op2 = load <256 x i8>, <256 x i8>* %b
+  %res = mul <256 x i8> %op1, %op2
+  store <256 x i8> %res, <256 x i8>* %a
+  ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <4 x i16> @mul_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
+; CHECK-LABEL: @mul_v4i16
+; CHECK: mul v0.4h, v0.4h, v1.4h
+; CHECK: ret
+  %res = mul <4 x i16> %op1, %op2
+  ret <4 x i16> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <8 x i16> @mul_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
+; CHECK-LABEL: @mul_v8i16
+; CHECK: mul v0.8h, v0.8h, v1.8h
+; CHECK: ret
+  %res = mul <8 x i16> %op1, %op2
+  ret <8 x i16> %res
+}
+
+define void @mul_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+; CHECK-LABEL: @mul_v16i16
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
+; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op2 = load <16 x i16>, <16 x i16>* %b
+  %res = mul <16 x i16> %op1, %op2
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define void @mul_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
+; CHECK-LABEL: @mul_v32i16
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
+; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <32 x i16>, <32 x i16>* %a
+  %op2 = load <32 x i16>, <32 x i16>* %b
+  %res = mul <32 x i16> %op1, %op2
+  store <32 x i16> %res, <32 x i16>* %a
+  ret void
+}
+
+define void @mul_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
+; CHECK-LABEL: @mul_v64i16
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
+; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <64 x i16>, <64 x i16>* %a
+  %op2 = load <64 x i16>, <64 x i16>* %b
+  %res = mul <64 x i16> %op1, %op2
+  store <64 x i16> %res, <64 x i16>* %a
+  ret void
+}
+
+define void @mul_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
+; CHECK-LABEL: @mul_v128i16
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
+; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
+; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <128 x i16>, <128 x i16>* %a
+  %op2 = load <128 x i16>, <128 x i16>* %b
+  %res = mul <128 x i16> %op1, %op2
+  store <128 x i16> %res, <128 x i16>* %a
+  ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <2 x i32> @mul_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
+; CHECK-LABEL: @mul_v2i32
+; CHECK: mul v0.2s, v0.2s, v1.2s
+; CHECK: ret
+  %res = mul <2 x i32> %op1, %op2
+  ret <2 x i32> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x i32> @mul_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
+; CHECK-LABEL: @mul_v4i32
+; CHECK: mul v0.4s, v0.4s, v1.4s
+; CHECK: ret
+  %res = mul <4 x i32> %op1, %op2
+  ret <4 x i32> %res
+}
+
+define void @mul_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+; CHECK-LABEL: @mul_v8i32
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
+; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op2 = load <8 x i32>, <8 x i32>* %b
+  %res = mul <8 x i32> %op1, %op2
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define void @mul_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
+; CHECK-LABEL: @mul_v16i32
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
+; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <16 x i32>, <16 x i32>* %a
+  %op2 = load <16 x i32>, <16 x i32>* %b
+  %res = mul <16 x i32> %op1, %op2
+  store <16 x i32> %res, <16 x i32>* %a
+  ret void
+}
+
+define void @mul_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
+; CHECK-LABEL: @mul_v32i32
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
+; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <32 x i32>, <32 x i32>* %a
+  %op2 = load <32 x i32>, <32 x i32>* %b
+  %res = mul <32 x i32> %op1, %op2
+  store <32 x i32> %res, <32 x i32>* %a
+  ret void
+}
+
+define void @mul_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
+; CHECK-LABEL: @mul_v64i32
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
+; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
+; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <64 x i32>, <64 x i32>* %a
+  %op2 = load <64 x i32>, <64 x i32>* %b
+  %res = mul <64 x i32> %op1, %op2
+  store <64 x i32> %res, <64 x i32>* %a
+  ret void
+}
+
+; Vector i64 multiplications are not legal for NEON so use SVE when available.
+define <1 x i64> @mul_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
+; CHECK-LABEL: @mul_v1i64
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl1
+; CHECK: mul z0.d, [[PG]]/m, z0.d, z1.d
+; CHECK: ret
+  %res = mul <1 x i64> %op1, %op2
+  ret <1 x i64> %res
+}
+
+; Vector i64 multiplications are not legal for NEON so use SVE when available.
+define <2 x i64> @mul_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
+; CHECK-LABEL: @mul_v2i64
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl2
+; CHECK: mul z0.d, [[PG]]/m, z0.d, z1.d
+; CHECK: ret
+  %res = mul <2 x i64> %op1, %op2
+  ret <2 x i64> %res
+}
+
+define void @mul_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+; CHECK-LABEL: @mul_v4i64
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
+; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op2 = load <4 x i64>, <4 x i64>* %b
+  %res = mul <4 x i64> %op1, %op2
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+define void @mul_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
+; CHECK-LABEL: @mul_v8i64
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
+; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <8 x i64>, <8 x i64>* %a
+  %op2 = load <8 x i64>, <8 x i64>* %b
+  %res = mul <8 x i64> %op1, %op2
+  store <8 x i64> %res, <8 x i64>* %a
+  ret void
+}
+
+define void @mul_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
+; CHECK-LABEL: @mul_v16i64
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
+; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <16 x i64>, <16 x i64>* %a
+  %op2 = load <16 x i64>, <16 x i64>* %b
+  %res = mul <16 x i64> %op1, %op2
+  store <16 x i64> %res, <16 x i64>* %a
+  ret void
+}
+
+define void @mul_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
+; CHECK-LABEL: @mul_v32i64
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
+; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
+; CHECK: mul [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
+; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK: ret
+  %op1 = load <32 x i64>, <32 x i64>* %a
+  %op2 = load <32 x i64>, <32 x i64>* %b
+  %res = mul <32 x i64> %op1, %op2
+  store <32 x i64> %res, <32 x i64>* %a
+  ret void
+}
+
 attributes #0 = { "target-features"="+sve" }


        


More information about the llvm-commits mailing list