[llvm] a1f369e - [AArch64][SVE] Add dot product lowering for PARTIAL_REDUCE_MLA node (#130933)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Apr 23 05:19:44 PDT 2025
Author: Nicholas Guy
Date: 2025-04-23T13:19:41+01:00
New Revision: a1f369e6309c8c6adaae886afc55817b97953641
URL: https://github.com/llvm/llvm-project/commit/a1f369e6309c8c6adaae886afc55817b97953641
DIFF: https://github.com/llvm/llvm-project/commit/a1f369e6309c8c6adaae886afc55817b97953641.diff
LOG: [AArch64][SVE] Add dot product lowering for PARTIAL_REDUCE_MLA node (#130933)
Add lowering in tablegen for PARTIAL_REDUCE_U/SMLA ISD nodes. Only
happens when the combine has been performed on the ISD node. Also adds
in check to only do the DAG combine when the node can then eventually be
lowered, so changes neon tests too.
---------
Co-authored-by: James Chesterman <james.chesterman at arm.com>
Added:
Modified:
llvm/include/llvm/CodeGen/TargetLowering.h
llvm/include/llvm/Target/TargetSelectionDAG.td
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
llvm/lib/CodeGen/TargetLoweringBase.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 657d8637d6811..abe261728a3e6 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1650,6 +1650,24 @@ class TargetLoweringBase {
getCondCodeAction(CC, VT) == Custom;
}
+ /// Return how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type
+ /// InputVT should be treated. Either it's legal, needs to be promoted to a
+ /// larger size, needs to be expanded to some other code sequence, or the
+ /// target has a custom expander for it.
+ LegalizeAction getPartialReduceMLAAction(EVT AccVT, EVT InputVT) const {
+ PartialReduceActionTypes TypePair = {AccVT.getSimpleVT().SimpleTy,
+ InputVT.getSimpleVT().SimpleTy};
+ auto It = PartialReduceMLAActions.find(TypePair);
+ return It != PartialReduceMLAActions.end() ? It->second : Expand;
+ }
+
+ /// Return true if a PARTIAL_REDUCE_U/SMLA node with the specified types is
+ /// legal or custom for this target.
+ bool isPartialReduceMLALegalOrCustom(EVT AccVT, EVT InputVT) const {
+ LegalizeAction Action = getPartialReduceMLAAction(AccVT, InputVT);
+ return Action == Legal || Action == Custom;
+ }
+
/// If the action for this operation is to promote, this method returns the
/// ValueType to promote to.
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
@@ -2727,6 +2745,18 @@ class TargetLoweringBase {
setCondCodeAction(CCs, VT, Action);
}
+ /// Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input
+ /// type InputVT should be treated by the target. Either it's legal, needs to
+ /// be promoted to a larger size, needs to be expanded to some other code
+ /// sequence, or the target has a custom expander for it.
+ void setPartialReduceMLAAction(MVT AccVT, MVT InputVT,
+ LegalizeAction Action) {
+ assert(AccVT.isValid() && InputVT.isValid() &&
+ "setPartialReduceMLAAction types aren't valid");
+ PartialReduceActionTypes TypePair = {AccVT.SimpleTy, InputVT.SimpleTy};
+ PartialReduceMLAActions[TypePair] = Action;
+ }
+
/// If Opc/OrigVT is specified as being promoted, the promotion code defaults
/// to trying a larger integer/fp until it can find one that works. If that
/// default is insufficient, this method can be used by the target to override
@@ -3706,6 +3736,13 @@ class TargetLoweringBase {
/// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
+ using PartialReduceActionTypes =
+ std::pair<MVT::SimpleValueType, MVT::SimpleValueType>;
+ /// For each result type and input type for the ISD::PARTIAL_REDUCE_U/SMLA
+ /// nodes, keep a LegalizeAction which indicates how instruction selection
+ /// should deal with this operation.
+ DenseMap<PartialReduceActionTypes, LegalizeAction> PartialReduceMLAActions;
+
ValueTypeActionImpl ValueTypeActions;
private:
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 9c241b6c4df0f..a807ce267aacf 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -313,6 +313,10 @@ def SDTSubVecInsert : SDTypeProfile<1, 3, [ // subvector insert
SDTCisSubVecOfVec<2, 1>, SDTCisSameAs<0,1>, SDTCisInt<3>
]>;
+def SDTPartialReduceMLA : SDTypeProfile<1, 3, [ // partial reduce mla
+ SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>
+]>;
+
def SDTPrefetch : SDTypeProfile<0, 4, [ // prefetch
SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>, SDTCisInt<1>
]>;
@@ -513,6 +517,11 @@ def vecreduce_fmax : SDNode<"ISD::VECREDUCE_FMAX", SDTFPVecReduce>;
def vecreduce_fminimum : SDNode<"ISD::VECREDUCE_FMINIMUM", SDTFPVecReduce>;
def vecreduce_fmaximum : SDNode<"ISD::VECREDUCE_FMAXIMUM", SDTFPVecReduce>;
+def partial_reduce_umla : SDNode<"ISD::PARTIAL_REDUCE_UMLA",
+ SDTPartialReduceMLA>;
+def partial_reduce_smla : SDNode<"ISD::PARTIAL_REDUCE_SMLA",
+ SDTPartialReduceMLA>;
+
def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>;
def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>;
def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>;
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index c22cd6472684c..cb5943eca82f5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12644,8 +12644,13 @@ SDValue DAGCombiner::visitPARTIAL_REDUCE_MLA(SDNode *N) {
if (LHSExtOpVT != RHSExtOp.getValueType() || LHSOpcode != RHSOpcode)
return SDValue();
- // FIXME: Add a check to only perform the DAG combine if there is lowering
- // provided by the target
+ // Only perform the DAG combine if there is custom lowering provided by the
+ // target
+ auto *Context = DAG.getContext();
+ if (!TLI.isPartialReduceMLALegalOrCustom(
+ TLI.getTypeToTransformTo(*Context, N->getValueType(0)),
+ TLI.getTypeToTransformTo(*Context, LHSExtOpVT)))
+ return SDValue();
bool ExtIsSigned = LHSOpcode == ISD::SIGN_EXTEND;
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
index 27bde7b96c857..c61e5b263a967 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp
@@ -469,8 +469,6 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::VECTOR_COMPRESS:
case ISD::SCMP:
case ISD::UCMP:
- case ISD::PARTIAL_REDUCE_UMLA:
- case ISD::PARTIAL_REDUCE_SMLA:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::SMULFIX:
@@ -530,6 +528,11 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
Action = TLI.getOperationAction(Node->getOpcode(), OpVT);
break;
}
+ case ISD::PARTIAL_REDUCE_UMLA:
+ case ISD::PARTIAL_REDUCE_SMLA:
+ Action = TLI.getPartialReduceMLAAction(Node->getValueType(0),
+ Node->getOperand(1).getValueType());
+ break;
#define BEGIN_REGISTER_VP_SDNODE(VPID, LEGALPOS, ...) \
case ISD::VPID: { \
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 483e52d16d537..c85f0c71ef25f 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -843,10 +843,6 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::GET_FPENV, VT, Expand);
setOperationAction(ISD::SET_FPENV, VT, Expand);
setOperationAction(ISD::RESET_FPENV, VT, Expand);
-
- // PartialReduceMLA operations default to expand.
- setOperationAction({ISD::PARTIAL_REDUCE_UMLA, ISD::PARTIAL_REDUCE_SMLA}, VT,
- Expand);
}
// Most targets ignore the @llvm.prefetch intrinsic.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 4e45162a687f8..447794cc2b744 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1850,6 +1850,14 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::INTRINSIC_WO_CHAIN, VT, Custom);
}
+ // Handle partial reduction operations
+ if (EnablePartialReduceNodes && Subtarget->isSVEorStreamingSVEAvailable()) {
+ // Mark known legal pairs as 'Legal' (these will expand to UDOT or SDOT).
+ // Other pairs will default to 'Expand'.
+ setPartialReduceMLAAction(MVT::nxv2i64, MVT::nxv8i16, Legal);
+ setPartialReduceMLAAction(MVT::nxv4i32, MVT::nxv16i8, Legal);
+ }
+
// Handle operations that are only available in non-streaming SVE mode.
if (Subtarget->isSVEAvailable()) {
for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64,
@@ -1889,7 +1897,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
}
}
-
if (Subtarget->hasMOPS() && Subtarget->hasMTE()) {
// Only required for llvm.aarch64.mops.memset.tag
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index d13728ec930c8..bd394671881e8 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -653,6 +653,17 @@ let Predicates = [HasSVE_or_SME] in {
defm SDOT_ZZZ : sve_intx_dot<0b0, "sdot", AArch64sdot>;
defm UDOT_ZZZ : sve_intx_dot<0b1, "udot", AArch64udot>;
+ let Predicates = [HasSVE_or_SME] in {
+ def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
+ (UDOT_ZZZ_S $Acc, $MulLHS, $MulRHS)>;
+ def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
+ (SDOT_ZZZ_S $Acc, $MulLHS, $MulRHS)>;
+ def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
+ (UDOT_ZZZ_D $Acc, $MulLHS, $MulRHS)>;
+ def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
+ (SDOT_ZZZ_D $Acc, $MulLHS, $MulRHS)>;
+ } // End HasSVE_or_SME
+
defm SDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b0, "sdot", int_aarch64_sve_sdot_lane>;
defm UDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b1, "udot", int_aarch64_sve_udot_lane>;
diff --git a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
index c48ebbad4fe21..9e305056abce2 100644
--- a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
@@ -12,15 +12,13 @@ define <4 x i32> @udot(<4 x i32> %acc, <16 x i8> %u, <16 x i8> %s) {
;
; CHECK-NODOT-LABEL: udot:
; CHECK-NODOT: // %bb.0:
-; CHECK-NODOT-NEXT: ushll v3.8h, v1.8b, #0
-; CHECK-NODOT-NEXT: ushll v4.8h, v2.8b, #0
-; CHECK-NODOT-NEXT: ushll2 v1.8h, v1.16b, #0
-; CHECK-NODOT-NEXT: ushll2 v2.8h, v2.16b, #0
-; CHECK-NODOT-NEXT: umlal v0.4s, v4.4h, v3.4h
-; CHECK-NODOT-NEXT: umull v5.4s, v2.4h, v1.4h
-; CHECK-NODOT-NEXT: umlal2 v0.4s, v2.8h, v1.8h
-; CHECK-NODOT-NEXT: umlal2 v5.4s, v4.8h, v3.8h
-; CHECK-NODOT-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-NODOT-NEXT: umull v3.8h, v2.8b, v1.8b
+; CHECK-NODOT-NEXT: umull2 v1.8h, v2.16b, v1.16b
+; CHECK-NODOT-NEXT: ushll v2.4s, v1.4h, #0
+; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v3.4h
+; CHECK-NODOT-NEXT: uaddw2 v2.4s, v2.4s, v3.8h
+; CHECK-NODOT-NEXT: uaddw2 v0.4s, v0.4s, v1.8h
+; CHECK-NODOT-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-NODOT-NEXT: ret
%u.wide = zext <16 x i8> %u to <16 x i32>
%s.wide = zext <16 x i8> %s to <16 x i32>
@@ -52,20 +50,18 @@ define <4 x i32> @udot_in_loop(ptr %p1, ptr %p2){
; CHECK-NODOT-NEXT: mov x8, xzr
; CHECK-NODOT-NEXT: .LBB1_1: // %vector.body
; CHECK-NODOT-NEXT: // =>This Inner Loop Header: Depth=1
-; CHECK-NODOT-NEXT: ldr q0, [x1, x8]
-; CHECK-NODOT-NEXT: ldr q2, [x0, x8]
+; CHECK-NODOT-NEXT: ldr q0, [x0, x8]
+; CHECK-NODOT-NEXT: ldr q2, [x1, x8]
; CHECK-NODOT-NEXT: add x8, x8, #16
; CHECK-NODOT-NEXT: cmp x8, #16
-; CHECK-NODOT-NEXT: ushll2 v3.8h, v0.16b, #0
-; CHECK-NODOT-NEXT: ushll2 v4.8h, v2.16b, #0
-; CHECK-NODOT-NEXT: ushll v5.8h, v0.8b, #0
-; CHECK-NODOT-NEXT: ushll v2.8h, v2.8b, #0
+; CHECK-NODOT-NEXT: umull v3.8h, v0.8b, v2.8b
+; CHECK-NODOT-NEXT: umull2 v2.8h, v0.16b, v2.16b
; CHECK-NODOT-NEXT: mov v0.16b, v1.16b
-; CHECK-NODOT-NEXT: umull v6.4s, v4.4h, v3.4h
-; CHECK-NODOT-NEXT: umlal v1.4s, v2.4h, v5.4h
-; CHECK-NODOT-NEXT: umlal2 v6.4s, v2.8h, v5.8h
-; CHECK-NODOT-NEXT: umlal2 v1.4s, v4.8h, v3.8h
-; CHECK-NODOT-NEXT: add v1.4s, v6.4s, v1.4s
+; CHECK-NODOT-NEXT: ushll v1.4s, v2.4h, #0
+; CHECK-NODOT-NEXT: uaddw v4.4s, v0.4s, v3.4h
+; CHECK-NODOT-NEXT: uaddw2 v1.4s, v1.4s, v3.8h
+; CHECK-NODOT-NEXT: uaddw2 v2.4s, v4.4s, v2.8h
+; CHECK-NODOT-NEXT: add v1.4s, v1.4s, v2.4s
; CHECK-NODOT-NEXT: b.ne .LBB1_1
; CHECK-NODOT-NEXT: // %bb.2: // %end
; CHECK-NODOT-NEXT: ret
@@ -99,19 +95,17 @@ define <2 x i32> @udot_narrow(<2 x i32> %acc, <8 x i8> %u, <8 x i8> %s) {
;
; CHECK-NODOT-LABEL: udot_narrow:
; CHECK-NODOT: // %bb.0:
-; CHECK-NODOT-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-NODOT-NEXT: ushll v2.8h, v2.8b, #0
+; CHECK-NODOT-NEXT: umull v1.8h, v2.8b, v1.8b
; CHECK-NODOT-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NODOT-NEXT: umull v3.4s, v2.4h, v1.4h
-; CHECK-NODOT-NEXT: umull2 v4.4s, v2.8h, v1.8h
-; CHECK-NODOT-NEXT: ext v5.16b, v1.16b, v1.16b, #8
-; CHECK-NODOT-NEXT: ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NODOT-NEXT: umlal v0.4s, v2.4h, v1.4h
+; CHECK-NODOT-NEXT: ushll v2.4s, v1.4h, #0
+; CHECK-NODOT-NEXT: ushll2 v3.4s, v1.8h, #0
+; CHECK-NODOT-NEXT: ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v1.4h
; CHECK-NODOT-NEXT: ext v3.16b, v3.16b, v3.16b, #8
-; CHECK-NODOT-NEXT: ext v1.16b, v4.16b, v4.16b, #8
-; CHECK-NODOT-NEXT: umlal v3.4s, v6.4h, v5.4h
-; CHECK-NODOT-NEXT: add v0.2s, v1.2s, v0.2s
+; CHECK-NODOT-NEXT: ext v2.16b, v2.16b, v2.16b, #8
; CHECK-NODOT-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NODOT-NEXT: uaddw v1.4s, v2.4s, v4.4h
+; CHECK-NODOT-NEXT: add v0.2s, v1.2s, v0.2s
; CHECK-NODOT-NEXT: ret
%u.wide = zext <8 x i8> %u to <8 x i32>
%s.wide = zext <8 x i8> %s to <8 x i32>
@@ -128,15 +122,13 @@ define <4 x i32> @sdot(<4 x i32> %acc, <16 x i8> %u, <16 x i8> %s) {
;
; CHECK-NODOT-LABEL: sdot:
; CHECK-NODOT: // %bb.0:
-; CHECK-NODOT-NEXT: sshll v3.8h, v1.8b, #0
-; CHECK-NODOT-NEXT: sshll v4.8h, v2.8b, #0
-; CHECK-NODOT-NEXT: sshll2 v1.8h, v1.16b, #0
-; CHECK-NODOT-NEXT: sshll2 v2.8h, v2.16b, #0
-; CHECK-NODOT-NEXT: smlal v0.4s, v4.4h, v3.4h
-; CHECK-NODOT-NEXT: smull v5.4s, v2.4h, v1.4h
-; CHECK-NODOT-NEXT: smlal2 v0.4s, v2.8h, v1.8h
-; CHECK-NODOT-NEXT: smlal2 v5.4s, v4.8h, v3.8h
-; CHECK-NODOT-NEXT: add v0.4s, v5.4s, v0.4s
+; CHECK-NODOT-NEXT: smull v3.8h, v2.8b, v1.8b
+; CHECK-NODOT-NEXT: smull2 v1.8h, v2.16b, v1.16b
+; CHECK-NODOT-NEXT: sshll v2.4s, v1.4h, #0
+; CHECK-NODOT-NEXT: saddw v0.4s, v0.4s, v3.4h
+; CHECK-NODOT-NEXT: saddw2 v2.4s, v2.4s, v3.8h
+; CHECK-NODOT-NEXT: saddw2 v0.4s, v0.4s, v1.8h
+; CHECK-NODOT-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-NODOT-NEXT: ret
%u.wide = sext <16 x i8> %u to <16 x i32>
%s.wide = sext <16 x i8> %s to <16 x i32>
@@ -153,19 +145,17 @@ define <2 x i32> @sdot_narrow(<2 x i32> %acc, <8 x i8> %u, <8 x i8> %s) {
;
; CHECK-NODOT-LABEL: sdot_narrow:
; CHECK-NODOT: // %bb.0:
-; CHECK-NODOT-NEXT: sshll v1.8h, v1.8b, #0
-; CHECK-NODOT-NEXT: sshll v2.8h, v2.8b, #0
+; CHECK-NODOT-NEXT: smull v1.8h, v2.8b, v1.8b
; CHECK-NODOT-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NODOT-NEXT: smull v3.4s, v2.4h, v1.4h
-; CHECK-NODOT-NEXT: smull2 v4.4s, v2.8h, v1.8h
-; CHECK-NODOT-NEXT: ext v5.16b, v1.16b, v1.16b, #8
-; CHECK-NODOT-NEXT: ext v6.16b, v2.16b, v2.16b, #8
-; CHECK-NODOT-NEXT: smlal v0.4s, v2.4h, v1.4h
+; CHECK-NODOT-NEXT: sshll v2.4s, v1.4h, #0
+; CHECK-NODOT-NEXT: sshll2 v3.4s, v1.8h, #0
+; CHECK-NODOT-NEXT: ext v4.16b, v1.16b, v1.16b, #8
+; CHECK-NODOT-NEXT: saddw v0.4s, v0.4s, v1.4h
; CHECK-NODOT-NEXT: ext v3.16b, v3.16b, v3.16b, #8
-; CHECK-NODOT-NEXT: ext v1.16b, v4.16b, v4.16b, #8
-; CHECK-NODOT-NEXT: smlal v3.4s, v6.4h, v5.4h
-; CHECK-NODOT-NEXT: add v0.2s, v1.2s, v0.2s
+; CHECK-NODOT-NEXT: ext v2.16b, v2.16b, v2.16b, #8
; CHECK-NODOT-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NODOT-NEXT: saddw v1.4s, v2.4s, v4.4h
+; CHECK-NODOT-NEXT: add v0.2s, v1.2s, v0.2s
; CHECK-NODOT-NEXT: ret
%u.wide = sext <8 x i8> %u to <8 x i32>
%s.wide = sext <8 x i8> %s to <8 x i32>
@@ -417,27 +407,19 @@ define <4 x i64> @udot_8to64(<4 x i64> %acc, <16 x i8> %a, <16 x i8> %b) {
;
; CHECK-NODOT-LABEL: udot_8to64:
; CHECK-NODOT: // %bb.0: // %entry
-; CHECK-NODOT-NEXT: ushll v4.8h, v3.8b, #0
-; CHECK-NODOT-NEXT: ushll v5.8h, v2.8b, #0
-; CHECK-NODOT-NEXT: ushll2 v3.8h, v3.16b, #0
-; CHECK-NODOT-NEXT: ushll2 v2.8h, v2.16b, #0
-; CHECK-NODOT-NEXT: ushll v6.4s, v4.4h, #0
-; CHECK-NODOT-NEXT: ushll v7.4s, v5.4h, #0
+; CHECK-NODOT-NEXT: umull v4.8h, v2.8b, v3.8b
+; CHECK-NODOT-NEXT: umull2 v2.8h, v2.16b, v3.16b
+; CHECK-NODOT-NEXT: ushll v3.4s, v4.4h, #0
+; CHECK-NODOT-NEXT: ushll v5.4s, v2.4h, #0
; CHECK-NODOT-NEXT: ushll2 v4.4s, v4.8h, #0
-; CHECK-NODOT-NEXT: ushll2 v5.4s, v5.8h, #0
-; CHECK-NODOT-NEXT: ushll2 v16.4s, v3.8h, #0
-; CHECK-NODOT-NEXT: ushll2 v17.4s, v2.8h, #0
-; CHECK-NODOT-NEXT: ushll v3.4s, v3.4h, #0
-; CHECK-NODOT-NEXT: ushll v2.4s, v2.4h, #0
-; CHECK-NODOT-NEXT: umlal2 v1.2d, v7.4s, v6.4s
-; CHECK-NODOT-NEXT: umlal v0.2d, v7.2s, v6.2s
-; CHECK-NODOT-NEXT: umull2 v18.2d, v5.4s, v4.4s
-; CHECK-NODOT-NEXT: umull v4.2d, v5.2s, v4.2s
-; CHECK-NODOT-NEXT: umlal2 v1.2d, v17.4s, v16.4s
-; CHECK-NODOT-NEXT: umlal v0.2d, v17.2s, v16.2s
-; CHECK-NODOT-NEXT: umlal2 v18.2d, v2.4s, v3.4s
-; CHECK-NODOT-NEXT: umlal v4.2d, v2.2s, v3.2s
-; CHECK-NODOT-NEXT: add v1.2d, v18.2d, v1.2d
+; CHECK-NODOT-NEXT: ushll2 v2.4s, v2.8h, #0
+; CHECK-NODOT-NEXT: uaddw2 v1.2d, v1.2d, v3.4s
+; CHECK-NODOT-NEXT: uaddw v0.2d, v0.2d, v3.2s
+; CHECK-NODOT-NEXT: uaddl2 v3.2d, v4.4s, v5.4s
+; CHECK-NODOT-NEXT: uaddl v4.2d, v4.2s, v5.2s
+; CHECK-NODOT-NEXT: uaddw2 v1.2d, v1.2d, v2.4s
+; CHECK-NODOT-NEXT: uaddw v0.2d, v0.2d, v2.2s
+; CHECK-NODOT-NEXT: add v1.2d, v3.2d, v1.2d
; CHECK-NODOT-NEXT: add v0.2d, v4.2d, v0.2d
; CHECK-NODOT-NEXT: ret
entry:
@@ -460,27 +442,19 @@ define <4 x i64> @sdot_8to64(<4 x i64> %acc, <16 x i8> %a, <16 x i8> %b){
;
; CHECK-NODOT-LABEL: sdot_8to64:
; CHECK-NODOT: // %bb.0: // %entry
-; CHECK-NODOT-NEXT: sshll v4.8h, v3.8b, #0
-; CHECK-NODOT-NEXT: sshll v5.8h, v2.8b, #0
-; CHECK-NODOT-NEXT: sshll2 v3.8h, v3.16b, #0
-; CHECK-NODOT-NEXT: sshll2 v2.8h, v2.16b, #0
-; CHECK-NODOT-NEXT: sshll v6.4s, v4.4h, #0
-; CHECK-NODOT-NEXT: sshll v7.4s, v5.4h, #0
+; CHECK-NODOT-NEXT: smull v4.8h, v2.8b, v3.8b
+; CHECK-NODOT-NEXT: smull2 v2.8h, v2.16b, v3.16b
+; CHECK-NODOT-NEXT: sshll v3.4s, v4.4h, #0
+; CHECK-NODOT-NEXT: sshll v5.4s, v2.4h, #0
; CHECK-NODOT-NEXT: sshll2 v4.4s, v4.8h, #0
-; CHECK-NODOT-NEXT: sshll2 v5.4s, v5.8h, #0
-; CHECK-NODOT-NEXT: sshll2 v16.4s, v3.8h, #0
-; CHECK-NODOT-NEXT: sshll2 v17.4s, v2.8h, #0
-; CHECK-NODOT-NEXT: sshll v3.4s, v3.4h, #0
-; CHECK-NODOT-NEXT: sshll v2.4s, v2.4h, #0
-; CHECK-NODOT-NEXT: smlal2 v1.2d, v7.4s, v6.4s
-; CHECK-NODOT-NEXT: smlal v0.2d, v7.2s, v6.2s
-; CHECK-NODOT-NEXT: smull2 v18.2d, v5.4s, v4.4s
-; CHECK-NODOT-NEXT: smull v4.2d, v5.2s, v4.2s
-; CHECK-NODOT-NEXT: smlal2 v1.2d, v17.4s, v16.4s
-; CHECK-NODOT-NEXT: smlal v0.2d, v17.2s, v16.2s
-; CHECK-NODOT-NEXT: smlal2 v18.2d, v2.4s, v3.4s
-; CHECK-NODOT-NEXT: smlal v4.2d, v2.2s, v3.2s
-; CHECK-NODOT-NEXT: add v1.2d, v18.2d, v1.2d
+; CHECK-NODOT-NEXT: sshll2 v2.4s, v2.8h, #0
+; CHECK-NODOT-NEXT: saddw2 v1.2d, v1.2d, v3.4s
+; CHECK-NODOT-NEXT: saddw v0.2d, v0.2d, v3.2s
+; CHECK-NODOT-NEXT: saddl2 v3.2d, v4.4s, v5.4s
+; CHECK-NODOT-NEXT: saddl v4.2d, v4.2s, v5.2s
+; CHECK-NODOT-NEXT: saddw2 v1.2d, v1.2d, v2.4s
+; CHECK-NODOT-NEXT: saddw v0.2d, v0.2d, v2.2s
+; CHECK-NODOT-NEXT: add v1.2d, v3.2d, v1.2d
; CHECK-NODOT-NEXT: add v0.2d, v4.2d, v0.2d
; CHECK-NODOT-NEXT: ret
entry:
@@ -797,10 +771,9 @@ define <4 x i64> @sdot_no_bin_op_8to64(<4 x i64> %acc, <16 x i8> %a){
define <4 x i32> @not_udot(<4 x i32> %acc, <8 x i8> %u, <8 x i8> %s) #0{
; CHECK-LABEL: not_udot:
; CHECK: // %bb.0:
-; CHECK-NEXT: ushll v1.8h, v1.8b, #0
-; CHECK-NEXT: ushll v2.8h, v2.8b, #0
-; CHECK-NEXT: umlal v0.4s, v2.4h, v1.4h
-; CHECK-NEXT: umlal2 v0.4s, v2.8h, v1.8h
+; CHECK-NEXT: umull v1.8h, v2.8b, v1.8b
+; CHECK-NEXT: uaddw v0.4s, v0.4s, v1.4h
+; CHECK-NEXT: uaddw2 v0.4s, v0.4s, v1.8h
; CHECK-NEXT: ret
%u.wide = zext <8 x i8> %u to <8 x i32>
%s.wide = zext <8 x i8> %s to <8 x i32>
diff --git a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
index 8d3b12e359f3f..ed27f40aba774 100644
--- a/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/sve-partial-reduce-dot-product.ll
@@ -11,24 +11,7 @@ define <vscale x 4 x i32> @udot(<vscale x 4 x i32> %acc, <vscale x 16 x i8> %a,
;
; CHECK-NEWLOWERING-LABEL: udot:
; CHECK-NEWLOWERING: // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT: uunpklo z3.h, z2.b
-; CHECK-NEWLOWERING-NEXT: uunpklo z4.h, z1.b
-; CHECK-NEWLOWERING-NEXT: uunpkhi z2.h, z2.b
-; CHECK-NEWLOWERING-NEXT: uunpkhi z1.h, z1.b
-; CHECK-NEWLOWERING-NEXT: ptrue p0.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z5.s, z3.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z6.s, z4.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z3.s, z3.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z4.s, z4.h
-; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z6.s, z5.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z5.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z6.s, z1.h
-; CHECK-NEWLOWERING-NEXT: mul z3.s, z4.s, z3.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z1.s, z1.h
-; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z6.s, z5.s
-; CHECK-NEWLOWERING-NEXT: mad z1.s, p0/m, z2.s, z3.s
-; CHECK-NEWLOWERING-NEXT: add z0.s, z1.s, z0.s
+; CHECK-NEWLOWERING-NEXT: udot z0.s, z1.b, z2.b
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = zext <vscale x 16 x i8> %a to <vscale x 16 x i32>
@@ -46,24 +29,7 @@ define <vscale x 2 x i64> @udot_wide(<vscale x 2 x i64> %acc, <vscale x 8 x i16>
;
; CHECK-NEWLOWERING-LABEL: udot_wide:
; CHECK-NEWLOWERING: // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z4.s, z1.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEWLOWERING-NEXT: ptrue p0.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z5.d, z3.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z6.d, z4.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z3.d, z3.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z4.d, z4.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: uunpkhi z5.d, z2.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z6.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mul z3.d, z4.d, z3.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z2.d, z2.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z1.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: mad z1.d, p0/m, z2.d, z3.d
-; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEWLOWERING-NEXT: udot z0.d, z1.h, z2.h
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = zext <vscale x 8 x i16> %a to <vscale x 8 x i64>
@@ -81,24 +47,7 @@ define <vscale x 4 x i32> @sdot(<vscale x 4 x i32> %accc, <vscale x 16 x i8> %a,
;
; CHECK-NEWLOWERING-LABEL: sdot:
; CHECK-NEWLOWERING: // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT: sunpklo z3.h, z2.b
-; CHECK-NEWLOWERING-NEXT: sunpklo z4.h, z1.b
-; CHECK-NEWLOWERING-NEXT: sunpkhi z2.h, z2.b
-; CHECK-NEWLOWERING-NEXT: sunpkhi z1.h, z1.b
-; CHECK-NEWLOWERING-NEXT: ptrue p0.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z5.s, z3.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z6.s, z4.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z3.s, z3.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z4.s, z4.h
-; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z6.s, z5.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z5.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z6.s, z1.h
-; CHECK-NEWLOWERING-NEXT: mul z3.s, z4.s, z3.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z1.s, z1.h
-; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z6.s, z5.s
-; CHECK-NEWLOWERING-NEXT: mad z1.s, p0/m, z2.s, z3.s
-; CHECK-NEWLOWERING-NEXT: add z0.s, z1.s, z0.s
+; CHECK-NEWLOWERING-NEXT: sdot z0.s, z1.b, z2.b
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = sext <vscale x 16 x i8> %a to <vscale x 16 x i32>
@@ -116,24 +65,7 @@ define <vscale x 2 x i64> @sdot_wide(<vscale x 2 x i64> %acc, <vscale x 8 x i16>
;
; CHECK-NEWLOWERING-LABEL: sdot_wide:
; CHECK-NEWLOWERING: // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT: sunpklo z3.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z4.s, z1.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z1.s, z1.h
-; CHECK-NEWLOWERING-NEXT: ptrue p0.d
-; CHECK-NEWLOWERING-NEXT: sunpklo z5.d, z3.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z6.d, z4.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z3.d, z3.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z4.d, z4.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: sunpkhi z5.d, z2.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z6.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mul z3.d, z4.d, z3.d
-; CHECK-NEWLOWERING-NEXT: sunpklo z2.d, z2.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z1.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: mad z1.d, p0/m, z2.d, z3.d
-; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEWLOWERING-NEXT: sdot z0.d, z1.h, z2.h
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = sext <vscale x 8 x i16> %a to <vscale x 8 x i64>
@@ -274,46 +206,59 @@ define <vscale x 4 x i64> @udot_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8
;
; CHECK-NEWLOWERING-LABEL: udot_8to64:
; CHECK-NEWLOWERING: // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT: uunpklo z4.h, z3.b
-; CHECK-NEWLOWERING-NEXT: uunpklo z5.h, z2.b
-; CHECK-NEWLOWERING-NEXT: uunpkhi z3.h, z3.b
+; CHECK-NEWLOWERING-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEWLOWERING-NEXT: addvl sp, sp, #-2
+; CHECK-NEWLOWERING-NEXT: str z9, [sp] // 16-byte Folded Spill
+; CHECK-NEWLOWERING-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEWLOWERING-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEWLOWERING-NEXT: .cfi_offset w29, -16
+; CHECK-NEWLOWERING-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEWLOWERING-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; CHECK-NEWLOWERING-NEXT: uunpklo z4.h, z2.b
+; CHECK-NEWLOWERING-NEXT: uunpklo z5.h, z3.b
; CHECK-NEWLOWERING-NEXT: uunpkhi z2.h, z2.b
+; CHECK-NEWLOWERING-NEXT: uunpkhi z3.h, z3.b
; CHECK-NEWLOWERING-NEXT: ptrue p0.d
; CHECK-NEWLOWERING-NEXT: uunpklo z6.s, z4.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z7.s, z5.h
; CHECK-NEWLOWERING-NEXT: uunpkhi z4.s, z4.h
+; CHECK-NEWLOWERING-NEXT: uunpklo z7.s, z5.h
; CHECK-NEWLOWERING-NEXT: uunpkhi z5.s, z5.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z24.s, z3.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z25.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z3.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z26.d, z6.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z27.d, z7.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z6.d, z6.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z7.d, z7.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z28.d, z4.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z24.s, z2.h
+; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h
+; CHECK-NEWLOWERING-NEXT: uunpklo z25.s, z3.h
+; CHECK-NEWLOWERING-NEXT: uunpkhi z3.s, z3.h
+; CHECK-NEWLOWERING-NEXT: uunpkhi z26.d, z6.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z6.d, z6.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z27.d, z4.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z28.d, z7.s
; CHECK-NEWLOWERING-NEXT: uunpklo z29.d, z5.s
; CHECK-NEWLOWERING-NEXT: uunpkhi z4.d, z4.s
+; CHECK-NEWLOWERING-NEXT: uunpkhi z7.d, z7.s
; CHECK-NEWLOWERING-NEXT: uunpkhi z5.d, z5.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z27.d, z26.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z26.d, z24.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z24.d, z24.s
-; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z7.d, z6.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z6.d, z25.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z7.d, z3.s
-; CHECK-NEWLOWERING-NEXT: mul z27.d, z29.d, z28.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z28.d, z2.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z25.d, z25.s
+; CHECK-NEWLOWERING-NEXT: uunpkhi z30.d, z24.s
+; CHECK-NEWLOWERING-NEXT: uunpkhi z31.d, z2.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z24.d, z24.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z2.d, z2.s
+; CHECK-NEWLOWERING-NEXT: uunpkhi z8.d, z25.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z25.d, z25.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z9.d, z3.s
+; CHECK-NEWLOWERING-NEXT: mul z27.d, z27.d, z29.d
+; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z28.d
; CHECK-NEWLOWERING-NEXT: uunpkhi z3.d, z3.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z2.d, z2.s
-; CHECK-NEWLOWERING-NEXT: mul z4.d, z5.d, z4.d
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z26.d
-; CHECK-NEWLOWERING-NEXT: movprfx z5, z27
-; CHECK-NEWLOWERING-NEXT: mla z5.d, p0/m, z28.d, z7.d
-; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z25.d, z24.d
-; CHECK-NEWLOWERING-NEXT: mad z2.d, p0/m, z3.d, z4.d
-; CHECK-NEWLOWERING-NEXT: add z0.d, z5.d, z0.d
-; CHECK-NEWLOWERING-NEXT: add z1.d, z2.d, z1.d
+; CHECK-NEWLOWERING-NEXT: mul z4.d, z4.d, z5.d
+; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z26.d, z7.d
+; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z2.d, z9.d
+; CHECK-NEWLOWERING-NEXT: movprfx z2, z27
+; CHECK-NEWLOWERING-NEXT: mla z2.d, p0/m, z24.d, z25.d
+; CHECK-NEWLOWERING-NEXT: ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z31.d, z3.d
+; CHECK-NEWLOWERING-NEXT: movprfx z3, z4
+; CHECK-NEWLOWERING-NEXT: mla z3.d, p0/m, z30.d, z8.d
+; CHECK-NEWLOWERING-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEWLOWERING-NEXT: add z0.d, z2.d, z0.d
+; CHECK-NEWLOWERING-NEXT: add z1.d, z3.d, z1.d
+; CHECK-NEWLOWERING-NEXT: addvl sp, sp, #2
+; CHECK-NEWLOWERING-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = zext <vscale x 16 x i8> %a to <vscale x 16 x i64>
@@ -337,46 +282,59 @@ define <vscale x 4 x i64> @sdot_8to64(<vscale x 4 x i64> %acc, <vscale x 16 x i8
;
; CHECK-NEWLOWERING-LABEL: sdot_8to64:
; CHECK-NEWLOWERING: // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT: sunpklo z4.h, z3.b
-; CHECK-NEWLOWERING-NEXT: sunpklo z5.h, z2.b
-; CHECK-NEWLOWERING-NEXT: sunpkhi z3.h, z3.b
+; CHECK-NEWLOWERING-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEWLOWERING-NEXT: addvl sp, sp, #-2
+; CHECK-NEWLOWERING-NEXT: str z9, [sp] // 16-byte Folded Spill
+; CHECK-NEWLOWERING-NEXT: str z8, [sp, #1, mul vl] // 16-byte Folded Spill
+; CHECK-NEWLOWERING-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
+; CHECK-NEWLOWERING-NEXT: .cfi_offset w29, -16
+; CHECK-NEWLOWERING-NEXT: .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; CHECK-NEWLOWERING-NEXT: .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; CHECK-NEWLOWERING-NEXT: sunpklo z4.h, z2.b
+; CHECK-NEWLOWERING-NEXT: sunpklo z5.h, z3.b
; CHECK-NEWLOWERING-NEXT: sunpkhi z2.h, z2.b
+; CHECK-NEWLOWERING-NEXT: sunpkhi z3.h, z3.b
; CHECK-NEWLOWERING-NEXT: ptrue p0.d
; CHECK-NEWLOWERING-NEXT: sunpklo z6.s, z4.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z7.s, z5.h
; CHECK-NEWLOWERING-NEXT: sunpkhi z4.s, z4.h
+; CHECK-NEWLOWERING-NEXT: sunpklo z7.s, z5.h
; CHECK-NEWLOWERING-NEXT: sunpkhi z5.s, z5.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z24.s, z3.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z25.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z3.s, z3.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z26.d, z6.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z27.d, z7.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z6.d, z6.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z7.d, z7.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z28.d, z4.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z24.s, z2.h
+; CHECK-NEWLOWERING-NEXT: sunpkhi z2.s, z2.h
+; CHECK-NEWLOWERING-NEXT: sunpklo z25.s, z3.h
+; CHECK-NEWLOWERING-NEXT: sunpkhi z3.s, z3.h
+; CHECK-NEWLOWERING-NEXT: sunpkhi z26.d, z6.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z6.d, z6.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z27.d, z4.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z28.d, z7.s
; CHECK-NEWLOWERING-NEXT: sunpklo z29.d, z5.s
; CHECK-NEWLOWERING-NEXT: sunpkhi z4.d, z4.s
+; CHECK-NEWLOWERING-NEXT: sunpkhi z7.d, z7.s
; CHECK-NEWLOWERING-NEXT: sunpkhi z5.d, z5.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z27.d, z26.d
-; CHECK-NEWLOWERING-NEXT: sunpklo z26.d, z24.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z24.d, z24.s
-; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z7.d, z6.d
-; CHECK-NEWLOWERING-NEXT: sunpklo z6.d, z25.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z7.d, z3.s
-; CHECK-NEWLOWERING-NEXT: mul z27.d, z29.d, z28.d
-; CHECK-NEWLOWERING-NEXT: sunpklo z28.d, z2.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z25.d, z25.s
+; CHECK-NEWLOWERING-NEXT: sunpkhi z30.d, z24.s
+; CHECK-NEWLOWERING-NEXT: sunpkhi z31.d, z2.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z24.d, z24.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z2.d, z2.s
+; CHECK-NEWLOWERING-NEXT: sunpkhi z8.d, z25.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z25.d, z25.s
+; CHECK-NEWLOWERING-NEXT: sunpklo z9.d, z3.s
+; CHECK-NEWLOWERING-NEXT: mul z27.d, z27.d, z29.d
+; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z28.d
; CHECK-NEWLOWERING-NEXT: sunpkhi z3.d, z3.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z2.d, z2.s
-; CHECK-NEWLOWERING-NEXT: mul z4.d, z5.d, z4.d
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z26.d
-; CHECK-NEWLOWERING-NEXT: movprfx z5, z27
-; CHECK-NEWLOWERING-NEXT: mla z5.d, p0/m, z28.d, z7.d
-; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z25.d, z24.d
-; CHECK-NEWLOWERING-NEXT: mad z2.d, p0/m, z3.d, z4.d
-; CHECK-NEWLOWERING-NEXT: add z0.d, z5.d, z0.d
-; CHECK-NEWLOWERING-NEXT: add z1.d, z2.d, z1.d
+; CHECK-NEWLOWERING-NEXT: mul z4.d, z4.d, z5.d
+; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z26.d, z7.d
+; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z2.d, z9.d
+; CHECK-NEWLOWERING-NEXT: movprfx z2, z27
+; CHECK-NEWLOWERING-NEXT: mla z2.d, p0/m, z24.d, z25.d
+; CHECK-NEWLOWERING-NEXT: ldr z9, [sp] // 16-byte Folded Reload
+; CHECK-NEWLOWERING-NEXT: mla z1.d, p0/m, z31.d, z3.d
+; CHECK-NEWLOWERING-NEXT: movprfx z3, z4
+; CHECK-NEWLOWERING-NEXT: mla z3.d, p0/m, z30.d, z8.d
+; CHECK-NEWLOWERING-NEXT: ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
+; CHECK-NEWLOWERING-NEXT: add z0.d, z2.d, z0.d
+; CHECK-NEWLOWERING-NEXT: add z1.d, z3.d, z1.d
+; CHECK-NEWLOWERING-NEXT: addvl sp, sp, #2
+; CHECK-NEWLOWERING-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = sext <vscale x 16 x i8> %a to <vscale x 16 x i64>
@@ -845,11 +803,11 @@ define <vscale x 4 x i32> @not_udot(<vscale x 4 x i32> %acc, <vscale x 8 x i8> %
; CHECK-NEXT: and z1.h, z1.h, #0xff
; CHECK-NEXT: and z2.h, z2.h, #0xff
; CHECK-NEXT: ptrue p0.s
-; CHECK-NEXT: uunpklo z3.s, z2.h
-; CHECK-NEXT: uunpklo z4.s, z1.h
-; CHECK-NEXT: uunpkhi z2.s, z2.h
+; CHECK-NEXT: uunpklo z3.s, z1.h
+; CHECK-NEXT: uunpklo z4.s, z2.h
; CHECK-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEXT: mla z0.s, p0/m, z4.s, z3.s
+; CHECK-NEXT: uunpkhi z2.s, z2.h
+; CHECK-NEXT: mla z0.s, p0/m, z3.s, z4.s
; CHECK-NEXT: mla z0.s, p0/m, z1.s, z2.s
; CHECK-NEXT: ret
;
@@ -858,11 +816,11 @@ define <vscale x 4 x i32> @not_udot(<vscale x 4 x i32> %acc, <vscale x 8 x i8> %
; CHECK-NEWLOWERING-NEXT: and z1.h, z1.h, #0xff
; CHECK-NEWLOWERING-NEXT: and z2.h, z2.h, #0xff
; CHECK-NEWLOWERING-NEXT: ptrue p0.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z4.s, z1.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h
+; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z1.h
+; CHECK-NEWLOWERING-NEXT: uunpklo z4.s, z2.h
; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z4.s, z3.s
+; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h
+; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z3.s, z4.s
; CHECK-NEWLOWERING-NEXT: mla z0.s, p0/m, z1.s, z2.s
; CHECK-NEWLOWERING-NEXT: ret
entry:
@@ -879,11 +837,11 @@ define <vscale x 2 x i64> @not_udot_wide(<vscale x 2 x i64> %acc, <vscale x 4 x
; CHECK-NEXT: and z1.s, z1.s, #0xffff
; CHECK-NEXT: and z2.s, z2.s, #0xffff
; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpklo z3.d, z2.s
-; CHECK-NEXT: uunpklo z4.d, z1.s
-; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: uunpklo z3.d, z1.s
+; CHECK-NEXT: uunpklo z4.d, z2.s
; CHECK-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEXT: mla z0.d, p0/m, z4.d, z3.d
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: mla z0.d, p0/m, z3.d, z4.d
; CHECK-NEXT: mla z0.d, p0/m, z1.d, z2.d
; CHECK-NEXT: ret
;
@@ -892,11 +850,11 @@ define <vscale x 2 x i64> @not_udot_wide(<vscale x 2 x i64> %acc, <vscale x 4 x
; CHECK-NEWLOWERING-NEXT: and z1.s, z1.s, #0xffff
; CHECK-NEWLOWERING-NEXT: and z2.s, z2.s, #0xffff
; CHECK-NEWLOWERING-NEXT: ptrue p0.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z3.d, z2.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z4.d, z1.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z3.d, z1.s
+; CHECK-NEWLOWERING-NEXT: uunpklo z4.d, z2.s
; CHECK-NEWLOWERING-NEXT: uunpkhi z1.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z4.d, z3.d
+; CHECK-NEWLOWERING-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z3.d, z4.d
; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z1.d, z2.d
; CHECK-NEWLOWERING-NEXT: ret
entry:
@@ -1248,48 +1206,24 @@ define <vscale x 2 x i16> @udot_nxv8i8_promote (<vscale x 2 x i16> %acc, <vscale
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: and z1.h, z1.h, #0xff
; CHECK-NEXT: and z2.h, z2.h, #0xff
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: uunpklo z3.s, z2.h
-; CHECK-NEXT: uunpklo z4.s, z1.h
-; CHECK-NEXT: uunpkhi z2.s, z2.h
+; CHECK-NEXT: mul z1.h, z1.h, z2.h
+; CHECK-NEXT: uunpklo z2.s, z1.h
; CHECK-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEXT: uunpklo z5.d, z3.s
-; CHECK-NEXT: uunpklo z6.d, z4.s
-; CHECK-NEXT: uunpkhi z3.d, z3.s
-; CHECK-NEXT: uunpkhi z4.d, z4.s
-; CHECK-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEXT: uunpkhi z5.d, z2.s
-; CHECK-NEXT: uunpkhi z6.d, z1.s
-; CHECK-NEXT: mul z3.d, z4.d, z3.d
-; CHECK-NEXT: uunpklo z2.d, z2.s
-; CHECK-NEXT: uunpklo z1.d, z1.s
-; CHECK-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEXT: mad z1.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: uunpklo z3.d, z2.s
+; CHECK-NEXT: uunpklo z4.d, z1.s
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: add z2.d, z2.d, z4.d
; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: add z0.d, z2.d, z0.d
; CHECK-NEXT: ret
;
; CHECK-NEWLOWERING-LABEL: udot_nxv8i8_promote:
; CHECK-NEWLOWERING: // %bb.0: // %entry
-; CHECK-NEWLOWERING-NEXT: and z1.h, z1.h, #0xff
; CHECK-NEWLOWERING-NEXT: and z2.h, z2.h, #0xff
-; CHECK-NEWLOWERING-NEXT: ptrue p0.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z3.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z4.s, z1.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: uunpkhi z1.s, z1.h
-; CHECK-NEWLOWERING-NEXT: uunpklo z5.d, z3.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z6.d, z4.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z3.d, z3.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z4.d, z4.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: uunpkhi z5.d, z2.s
-; CHECK-NEWLOWERING-NEXT: uunpkhi z6.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mul z3.d, z4.d, z3.d
-; CHECK-NEWLOWERING-NEXT: uunpklo z2.d, z2.s
-; CHECK-NEWLOWERING-NEXT: uunpklo z1.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: mad z1.d, p0/m, z2.d, z3.d
-; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEWLOWERING-NEXT: and z1.h, z1.h, #0xff
+; CHECK-NEWLOWERING-NEXT: udot z0.d, z1.h, z2.h
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
@@ -1305,49 +1239,25 @@ define <vscale x 2 x i16> @sdot_nxv8i8_promote (<vscale x 2 x i16> %acc, <vscale
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: sxtb z1.h, p0/m, z1.h
; CHECK-NEXT: sxtb z2.h, p0/m, z2.h
-; CHECK-NEXT: ptrue p0.d
-; CHECK-NEXT: sunpklo z3.s, z2.h
-; CHECK-NEXT: sunpklo z4.s, z1.h
-; CHECK-NEXT: sunpkhi z2.s, z2.h
-; CHECK-NEXT: sunpkhi z1.s, z1.h
-; CHECK-NEXT: sunpklo z5.d, z3.s
-; CHECK-NEXT: sunpklo z6.d, z4.s
-; CHECK-NEXT: sunpkhi z3.d, z3.s
-; CHECK-NEXT: sunpkhi z4.d, z4.s
-; CHECK-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEXT: sunpkhi z5.d, z2.s
-; CHECK-NEXT: sunpkhi z6.d, z1.s
-; CHECK-NEXT: mul z3.d, z4.d, z3.d
-; CHECK-NEXT: sunpklo z2.d, z2.s
-; CHECK-NEXT: sunpklo z1.d, z1.s
-; CHECK-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEXT: mad z1.d, p0/m, z2.d, z3.d
+; CHECK-NEXT: mul z1.h, z1.h, z2.h
+; CHECK-NEXT: uunpklo z2.s, z1.h
+; CHECK-NEXT: uunpkhi z1.s, z1.h
+; CHECK-NEXT: uunpklo z3.d, z2.s
+; CHECK-NEXT: uunpklo z4.d, z1.s
+; CHECK-NEXT: uunpkhi z2.d, z2.s
+; CHECK-NEXT: uunpkhi z1.d, z1.s
+; CHECK-NEXT: add z0.d, z0.d, z3.d
+; CHECK-NEXT: add z2.d, z2.d, z4.d
; CHECK-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEXT: add z0.d, z2.d, z0.d
; CHECK-NEXT: ret
;
; CHECK-NEWLOWERING-LABEL: sdot_nxv8i8_promote:
; CHECK-NEWLOWERING: // %bb.0: // %entry
; CHECK-NEWLOWERING-NEXT: ptrue p0.h
-; CHECK-NEWLOWERING-NEXT: sxtb z1.h, p0/m, z1.h
; CHECK-NEWLOWERING-NEXT: sxtb z2.h, p0/m, z2.h
-; CHECK-NEWLOWERING-NEXT: ptrue p0.d
-; CHECK-NEWLOWERING-NEXT: sunpklo z3.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z4.s, z1.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z2.s, z2.h
-; CHECK-NEWLOWERING-NEXT: sunpkhi z1.s, z1.h
-; CHECK-NEWLOWERING-NEXT: sunpklo z5.d, z3.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z6.d, z4.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z3.d, z3.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z4.d, z4.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: sunpkhi z5.d, z2.s
-; CHECK-NEWLOWERING-NEXT: sunpkhi z6.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mul z3.d, z4.d, z3.d
-; CHECK-NEWLOWERING-NEXT: sunpklo z2.d, z2.s
-; CHECK-NEWLOWERING-NEXT: sunpklo z1.d, z1.s
-; CHECK-NEWLOWERING-NEXT: mla z0.d, p0/m, z6.d, z5.d
-; CHECK-NEWLOWERING-NEXT: mad z1.d, p0/m, z2.d, z3.d
-; CHECK-NEWLOWERING-NEXT: add z0.d, z1.d, z0.d
+; CHECK-NEWLOWERING-NEXT: sxtb z1.h, p0/m, z1.h
+; CHECK-NEWLOWERING-NEXT: sdot z0.d, z1.h, z2.h
; CHECK-NEWLOWERING-NEXT: ret
entry:
%a.wide = sext <vscale x 8 x i8> %a to <vscale x 8 x i16>
More information about the llvm-commits
mailing list