[llvm] [AArch64][SME2] Add FORM_STRIDED_TUPLE pseudo nodes (PR #116399)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Nov 15 22:27:44 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-aarch64
Author: Kerry McLaughlin (kmclaughlin-arm)
<details>
<summary>Changes</summary>
This patch adds a pseudo node to help towards improving register
allocation of multi-vector SME intrinsics.
The FORM_STRIDED_TUPLE node is emitted if each of the operands of a
contiguous multi-vector dot intrinsic are the result of a strided
multi-vector load. The operands of the pseudo will be one subregister
at the same index from each of these strided loads.
Follow up patches will use this pseudo when adding register allocation
hints to remove unnecessary register copies in this scenario. Subregister
liveness is also required to achieve this and has been enabled in the
tests changed by this patch.
Patch contains changes by Matthew Devereau.
---
Patch is 99.87 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/116399.diff
8 Files Affected:
- (modified) llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp (+32)
- (modified) llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp (+27)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+63)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.h (+3)
- (modified) llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td (+11)
- (modified) llvm/lib/Target/AArch64/SMEInstrFormats.td (+12)
- (modified) llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll (+447-125)
- (modified) llvm/test/CodeGen/AArch64/sme2-intrinsics-vdot.ll (+322-34)
``````````diff
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 055cb3cefcedf9..dabcaaf9f5c874 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -67,6 +67,10 @@ class AArch64ExpandPseudo : public MachineFunctionPass {
TargetRegisterClass ContiguousClass,
TargetRegisterClass StridedClass,
unsigned ContiguousOpc, unsigned StridedOpc);
+ bool expandFormTuplePseudo(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI,
+ unsigned Size);
bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
unsigned BitSize);
@@ -1142,6 +1146,30 @@ bool AArch64ExpandPseudo::expandMultiVecPseudo(
return true;
}
+bool AArch64ExpandPseudo::expandFormTuplePseudo(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
+ MachineBasicBlock::iterator &NextMBBI, unsigned Size) {
+ assert(Size == 2 || Size == 4 && "Invalid Tuple Size");
+ MachineInstr &MI = *MBBI;
+ Register ReturnTuple = MI.getOperand(0).getReg();
+
+ const TargetRegisterInfo *TRI =
+ MBB.getParent()->getSubtarget().getRegisterInfo();
+ for (unsigned i = 0; i < Size; i++) {
+ Register FormTupleOpReg = MI.getOperand(i + 1).getReg();
+ Register ReturnTupleSubReg =
+ TRI->getSubReg(ReturnTuple, AArch64::zsub0 + i);
+ if (FormTupleOpReg != ReturnTupleSubReg)
+ BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORR_ZZZ))
+ .addReg(ReturnTupleSubReg, RegState::Define)
+ .addReg(FormTupleOpReg)
+ .addReg(FormTupleOpReg);
+ }
+
+ MI.eraseFromParent();
+ return true;
+}
+
/// If MBBI references a pseudo instruction that should be expanded here,
/// do the expansion and return true. Otherwise return false.
bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
@@ -1724,6 +1752,10 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
return expandMultiVecPseudo(
MBB, MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
AArch64::LDNT1D_4Z, AArch64::LDNT1D_4Z_STRIDED);
+ case AArch64::FORM_STRIDED_TUPLE_X2_PSEUDO:
+ return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 2);
+ case AArch64::FORM_STRIDED_TUPLE_X4_PSEUDO:
+ return expandFormTuplePseudo(MBB, MBBI, NextMBBI, 4);
}
return false;
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 1969c830f4d312..d46bae07b3d4c5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -504,6 +504,8 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
bool SelectAllActivePredicate(SDValue N);
bool SelectAnyPredicate(SDValue N);
+
+ void SelectFormTuplePseudo(SDNode *N, unsigned Size);
};
class AArch64DAGToDAGISelLegacy : public SelectionDAGISelLegacy {
@@ -7181,6 +7183,14 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
}
break;
}
+ case AArch64ISD::FORM_STRIDED_TUPLE_X2: {
+ SelectFormTuplePseudo(Node, 2);
+ return;
+ }
+ case AArch64ISD::FORM_STRIDED_TUPLE_X4: {
+ SelectFormTuplePseudo(Node, 4);
+ return;
+ }
}
// Select the default instruction
@@ -7438,3 +7448,20 @@ bool AArch64DAGToDAGISel::SelectSMETileSlice(SDValue N, unsigned MaxSize,
Offset = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i64);
return true;
}
+
+void AArch64DAGToDAGISel::SelectFormTuplePseudo(SDNode *Node, unsigned Size) {
+ assert((Size == 2 || Size == 4) && "Invalid Tuple size");
+ EVT VT = Node->getValueType(0);
+ SmallVector<SDValue> Ops;
+ for (unsigned I = 0; I < Size; I++)
+ Ops.push_back(Node->getOperand(I));
+ SDLoc DL(Node);
+ unsigned Opc = Size == 2 ? AArch64::FORM_STRIDED_TUPLE_X2_PSEUDO
+ : AArch64::FORM_STRIDED_TUPLE_X4_PSEUDO;
+ SDNode *Tuple = CurDAG->getMachineNode(Opc, DL, MVT::Untyped, Ops);
+ SDValue SuperReg = SDValue(Tuple, 0);
+ for (unsigned I = 0; I < Size; ++I)
+ ReplaceUses(SDValue(Node, I), CurDAG->getTargetExtractSubreg(
+ AArch64::zsub0 + I, DL, VT, SuperReg));
+ CurDAG->RemoveDeadNode(Node);
+}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 9d1c3d4eddc880..b8c87b0ec2ea5f 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -2808,6 +2808,8 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
MAKE_CASE(AArch64ISD::FMUL_PRED)
MAKE_CASE(AArch64ISD::FSUB_PRED)
MAKE_CASE(AArch64ISD::RDSVL)
+ MAKE_CASE(AArch64ISD::FORM_STRIDED_TUPLE_X2)
+ MAKE_CASE(AArch64ISD::FORM_STRIDED_TUPLE_X4)
MAKE_CASE(AArch64ISD::BIC)
MAKE_CASE(AArch64ISD::CBZ)
MAKE_CASE(AArch64ISD::CBNZ)
@@ -5709,6 +5711,46 @@ SDValue AArch64TargetLowering::getRuntimePStateSM(SelectionDAG &DAG,
Mask);
}
+static unsigned getIntrinsicID(const SDNode *N);
+
+SDValue TryLowerMultiVecSMEDotIntrinsic(SDValue Op, SelectionDAG &DAG,
+ unsigned Size) {
+ assert((Size == 2 || Size == 4) && "Invalid Tuple Size");
+ auto IsStridedLoad = [Size](SDValue Op) -> bool {
+ unsigned Intrinsic = getIntrinsicID(Op.getNode());
+ if (Size == 2)
+ return Intrinsic == Intrinsic::aarch64_sve_ld1_pn_x2;
+ else
+ return Intrinsic == Intrinsic::aarch64_sve_ld1_pn_x4;
+ };
+
+ SmallVector<SDValue> Ops;
+ unsigned LastLoadIdx = Size == 2 ? 5 : 7;
+ unsigned LoadResNo = Op.getOperand(3).getResNo();
+ for (unsigned I = 3; I < LastLoadIdx; I++) {
+ if (!IsStridedLoad(Op->getOperand(I)) ||
+ Op.getOperand(I).getResNo() != LoadResNo)
+ return SDValue();
+ Ops.push_back(Op->getOperand(I));
+ }
+
+ EVT VT = Op->getOperand(3).getValueType();
+ SDVTList VTList =
+ Size == 2 ? DAG.getVTList(VT, VT) : DAG.getVTList(VT, VT, VT, VT);
+ unsigned Opc = Size == 2 ? AArch64ISD::FORM_STRIDED_TUPLE_X2
+ : AArch64ISD::FORM_STRIDED_TUPLE_X4;
+ SDLoc DL(Op);
+ SDValue Pseudo = DAG.getNode(Opc, DL, VTList, Ops);
+
+ SmallVector<SDValue> DotOps = {Op.getOperand(0), Op->getOperand(1),
+ Op->getOperand(2)};
+ for (unsigned I = 0; I < Size; I++)
+ DotOps.push_back(Pseudo.getValue(I));
+ DotOps.push_back(Op->getOperand(DotOps.size()));
+ DotOps.push_back(Op->getOperand(DotOps.size()));
+ return DAG.getNode(Op->getOpcode(), DL, MVT::Other, DotOps);
+}
+
// Lower an SME LDR/STR ZA intrinsic
// Case 1: If the vector number (vecnum) is an immediate in range, it gets
// folded into the instruction
@@ -5898,6 +5940,22 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_VOID(SDValue Op,
Op->getOperand(0), // Chain
DAG.getTargetConstant((int32_t)(AArch64SVCR::SVCRZA), DL, MVT::i32),
DAG.getConstant(AArch64SME::Always, DL, MVT::i64));
+ case Intrinsic::aarch64_sme_uvdot_lane_za32_vg1x4:
+ case Intrinsic::aarch64_sme_suvdot_lane_za32_vg1x4:
+ case Intrinsic::aarch64_sme_usvdot_lane_za32_vg1x4:
+ case Intrinsic::aarch64_sme_svdot_lane_za32_vg1x4:
+ case Intrinsic::aarch64_sme_usdot_lane_za32_vg1x4:
+ case Intrinsic::aarch64_sme_udot_lane_za32_vg1x4:
+ case Intrinsic::aarch64_sme_sudot_lane_za32_vg1x4:
+ case Intrinsic::aarch64_sme_sdot_lane_za32_vg1x4:
+ return TryLowerMultiVecSMEDotIntrinsic(Op, DAG, 4);
+ case Intrinsic::aarch64_sme_uvdot_lane_za32_vg1x2:
+ case Intrinsic::aarch64_sme_sdot_lane_za32_vg1x2:
+ case Intrinsic::aarch64_sme_svdot_lane_za32_vg1x2:
+ case Intrinsic::aarch64_sme_usdot_lane_za32_vg1x2:
+ case Intrinsic::aarch64_sme_sudot_lane_za32_vg1x2:
+ case Intrinsic::aarch64_sme_udot_lane_za32_vg1x2:
+ return TryLowerMultiVecSMEDotIntrinsic(Op, DAG, 2);
}
}
@@ -7639,6 +7697,11 @@ static unsigned getIntrinsicID(const SDNode *N) {
return IID;
return Intrinsic::not_intrinsic;
}
+ case ISD::INTRINSIC_W_CHAIN: {
+ unsigned IID = N->getConstantOperandVal(1);
+ if (IID < Intrinsic::num_intrinsics)
+ return IID;
+ }
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index d11da64d3f84eb..c7a70ab9f3c898 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -478,6 +478,9 @@ enum NodeType : unsigned {
SME_ZA_LDR,
SME_ZA_STR,
+ FORM_STRIDED_TUPLE_X2,
+ FORM_STRIDED_TUPLE_X4,
+
// NEON Load/Store with post-increment base updates
LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
LD3post,
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index a6ba6ddc30b277..5fb44fe5146d3c 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -28,6 +28,17 @@ def AArch64_restore_zt : SDNode<"AArch64ISD::RESTORE_ZT", SDTypeProfile<0, 2,
def AArch64_save_zt : SDNode<"AArch64ISD::SAVE_ZT", SDTypeProfile<0, 2,
[SDTCisInt<0>, SDTCisPtrTy<1>]>,
[SDNPHasChain, SDNPSideEffect, SDNPMayStore]>;
+
+def SDT_FORM_STRIDED_TUPLE_X2 : SDTypeProfile<4, 4,
+ [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
+
+def SDT_FORM_STRIDED_TUPLE_X4 : SDTypeProfile<4, 4,
+ [SDTCisVec<0>, SDTCisSameAs<0, 1>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,
+ SDTCisSameAs<0, 4>, SDTCisSameAs<0, 5>,
+ SDTCisSameAs<0, 6>, SDTCisSameAs<0, 7>]>;
+
def AArch64CoalescerBarrier
: SDNode<"AArch64ISD::COALESCER_BARRIER", SDTypeProfile<1, 1, []>, [SDNPOptInGlue, SDNPOutGlue]>;
diff --git a/llvm/lib/Target/AArch64/SMEInstrFormats.td b/llvm/lib/Target/AArch64/SMEInstrFormats.td
index 8c256b5818ee88..41508bce651c6b 100644
--- a/llvm/lib/Target/AArch64/SMEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SMEInstrFormats.td
@@ -34,6 +34,18 @@ def tileslicerange0s4 : ComplexPattern<i32, 2, "SelectSMETileSlice<0, 4>", []>;
def am_sme_indexed_b4 :ComplexPattern<iPTR, 2, "SelectAddrModeIndexedSVE<0,15>", [], [SDNPWantRoot]>;
+def FORM_STRIDED_TUPLE_X2_PSEUDO :
+ Pseudo<(outs ZPR2Mul2:$tup),
+ (ins ZPR:$zn0, ZPR:$zn1), []>, Sched<[]>{
+ let hasSideEffects = 0;
+}
+
+def FORM_STRIDED_TUPLE_X4_PSEUDO :
+ Pseudo<(outs ZPR4Mul4:$tup),
+ (ins ZPR:$zn0, ZPR:$zn1, ZPR:$zn2, ZPR:$zn3), []>, Sched<[]>{
+ let hasSideEffects = 0;
+}
+
def SDTZALoadStore : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisPtrTy<1>, SDTCisInt<2>]>;
def AArch64SMELdr : SDNode<"AArch64ISD::SME_ZA_LDR", SDTZALoadStore,
[SDNPHasChain, SDNPSideEffect, SDNPMayLoad]>;
diff --git a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
index 1e835c92ba9e4c..eddff238ace031 100644
--- a/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
+++ b/llvm/test/CodeGen/AArch64/sme2-intrinsics-int-dots.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -force-streaming -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -force-streaming -enable-subreg-liveness -verify-machineinstrs < %s | FileCheck %s
target triple="aarch64-linux-gnu"
@@ -26,18 +26,18 @@ define void @udot_multi_za32_u16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <
define void @udot_multi_za32_u16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
; CHECK-LABEL: udot_multi_za32_u16_vg1x4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z26.d, z7.d
-; CHECK-NEXT: mov z31.d, z4.d
-; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
-; CHECK-NEXT: mov z30.d, z3.d
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
-; CHECK-NEXT: mov z29.d, z2.d
; CHECK-NEXT: ld1h { z27.h }, p0/z, [x1]
-; CHECK-NEXT: mov z28.d, z1.d
-; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
-; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
+; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
; CHECK-NEXT: ret
<vscale x 8 x i16> %zn4, <vscale x 8 x i16> %zn5, <vscale x 8 x i16> %zn6, <vscale x 8 x i16> %zn7) #0 {
call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
@@ -68,18 +68,18 @@ define void @udot_multi_za32_u8_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <v
define void @udot_multi_za32_u8_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
; CHECK-LABEL: udot_multi_za32_u8_vg1x4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z26.d, z7.d
-; CHECK-NEXT: mov z31.d, z4.d
-; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
-; CHECK-NEXT: mov z30.d, z3.d
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
-; CHECK-NEXT: mov z29.d, z2.d
; CHECK-NEXT: ld1b { z27.b }, p0/z, [x1]
-; CHECK-NEXT: mov z28.d, z1.d
-; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z28.b - z31.b }, { z24.b - z27.b }
-; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z28.b - z31.b }, { z24.b - z27.b }
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: udot za.s[w8, 0, vgx4], { z4.b - z7.b }, { z24.b - z27.b }
+; CHECK-NEXT: udot za.s[w8, 7, vgx4], { z4.b - z7.b }, { z24.b - z27.b }
; CHECK-NEXT: ret
<vscale x 16 x i8> %zn4, <vscale x 16 x i8> %zn5, <vscale x 16 x i8> %zn6, <vscale x 16 x i8> %zn7) #0 {
call void @llvm.aarch64.sme.udot.za32.vg1x4.nxv16i8(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
@@ -110,18 +110,18 @@ define void @udot_multi_za64_u16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <
define void @udot_multi_za64_u16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
; CHECK-LABEL: udot_multi_za64_u16_vg1x4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z26.d, z7.d
-; CHECK-NEXT: mov z31.d, z4.d
-; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
-; CHECK-NEXT: mov z30.d, z3.d
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
-; CHECK-NEXT: mov z29.d, z2.d
; CHECK-NEXT: ld1h { z27.h }, p0/z, [x1]
-; CHECK-NEXT: mov z28.d, z1.d
-; CHECK-NEXT: udot za.d[w8, 0, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
-; CHECK-NEXT: udot za.d[w8, 7, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: udot za.d[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
+; CHECK-NEXT: udot za.d[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
; CHECK-NEXT: ret
<vscale x 8 x i16> %zn4, <vscale x 8 x i16> %zn5, <vscale x 8 x i16> %zn6, <vscale x 8 x i16> %zn7) #1 {
call void @llvm.aarch64.sme.udot.za64.vg1x4.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
@@ -152,18 +152,18 @@ define void @usdot_multi_za32_u8_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <
define void @usdot_multi_za32_u8_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
; CHECK-LABEL: usdot_multi_za32_u8_vg1x4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z26.d, z7.d
-; CHECK-NEXT: mov z31.d, z4.d
-; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
-; CHECK-NEXT: mov z30.d, z3.d
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
-; CHECK-NEXT: mov z29.d, z2.d
; CHECK-NEXT: ld1b { z27.b }, p0/z, [x1]
-; CHECK-NEXT: mov z28.d, z1.d
-; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z28.b - z31.b }, { z24.b - z27.b }
-; CHECK-NEXT: usdot za.s[w8, 7, vgx4], { z28.b - z31.b }, { z24.b - z27.b }
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: usdot za.s[w8, 0, vgx4], { z4.b - z7.b }, { z24.b - z27.b }
+; CHECK-NEXT: usdot za.s[w8, 7, vgx4], { z4.b - z7.b }, { z24.b - z27.b }
; CHECK-NEXT: ret
<vscale x 16 x i8> %zn4, <vscale x 16 x i8> %zn5, <vscale x 16 x i8> %zn6, <vscale x 16 x i8> %zn7) #0 {
call void @llvm.aarch64.sme.usdot.za32.vg1x4.nxv16i8(i32 %slice, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
@@ -197,18 +197,18 @@ define void @sdot_multi_za32_u16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <
define void @sdot_multi_za32_u16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
; CHECK-LABEL: sdot_multi_za32_u16_vg1x4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z26.d, z7.d
-; CHECK-NEXT: mov z31.d, z4.d
-; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: ptrue p0.h
+; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
-; CHECK-NEXT: mov z30.d, z3.d
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
-; CHECK-NEXT: mov z29.d, z2.d
; CHECK-NEXT: ld1h { z27.h }, p0/z, [x1]
-; CHECK-NEXT: mov z28.d, z1.d
-; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
-; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z28.h - z31.h }, { z24.h - z27.h }
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
+; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
; CHECK-NEXT: ret
<vscale x 8 x i16> %zn4, <vscale x 8 x i16> %zn5, <vscale x 8 x i16> %zn6, <vscale x 8 x i16> %zn7) #0 {
call void @llvm.aarch64.sme.sdot.za32.vg1x4.nxv8i16(i32 %slice, <vscale x 8 x i16> %zn0, <vscale x 8 x i16> %zn1, <vscale x 8 x i16> %zn2, <vscale x 8 x i16> %zn3,
@@ -239,18 +239,18 @@ define void @sdot_multi_za32_u8_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <v
define void @sdot_multi_za32_u8_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 16 x i8> %zn0, <vscale x 16 x i8> %zn1, <vscale x 16 x i8> %zn2, <vscale x 16 x i8> %zn3,
; CHECK-LABEL: sdot_multi_za32_u8_vg1x4:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov z26.d, z7.d
-; CHECK-NEXT: mov z31.d, z4.d
-; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: ptrue p0.b
+; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
-; CHECK-NEXT: mov z30.d, z3.d
+; CHECK-NEXT: mov z7.d, z4.d
+; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
-; CHECK-NEXT: mov z29.d, z2.d
; CHECK-NEXT: ld1b { z27.b }, p0/z, [x1]
-; CHECK-NEXT: mov z28.d, z1.d
-; CHECK-NEXT: sdot za.s[w8, 0, vgx4], { z28.b - z31.b }, { z24.b - z27.b }
-; CHECK-NEXT: sdot za.s[w8, 7, vgx4], { z28.b - z31.b }, { z24.b - z27.b }
+; CHECK-NEXT: mov z6.d, z3.d
+; CHECK-NEXT: mov z5.d, z2.d
+; CHECK-NEXT: mov z4.d, z1.d
+; CHECK-NEXT: sdot za.s[w8,...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/116399
More information about the llvm-commits
mailing list