[llvm] [RISCV] Support XSfmm LLVM IR and CodeGen (PR #143069)
Brandon Wu via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 22 07:52:46 PDT 2025
https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/143069
>From 9feb45832f2b521dbb64ecbbe62286e055bcbec3 Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Thu, 5 Jun 2025 02:51:08 -0700
Subject: [PATCH 1/2] [RISCV] Add XSfmm pseudo instruction and vset* insertion
support
This patch supports the naive vset* insertion. If the state(tm, tn, tk,
sew, widen) changes, it emits all of the vset* instructions that are
needed, partial compatibility is not supported yet.
Co-authored-by: Piyou Chen <piyou.chen at sifive.com>
---
.../llvm/TargetParser/RISCVTargetParser.h | 2 +
.../Target/RISCV/AsmParser/RISCVAsmParser.cpp | 4 +
.../Target/RISCV/MCTargetDesc/RISCVBaseInfo.h | 66 ++-
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 223 +++++++-
llvm/lib/Target/RISCV/RISCVInstrFormats.td | 16 +
llvm/lib/Target/RISCV/RISCVInstrInfo.cpp | 8 +
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 22 +-
llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td | 9 +-
llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td | 138 +++++
llvm/lib/Target/RISCV/RISCVInstrPredicates.td | 36 +-
llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp | 4 +
llvm/lib/TargetParser/RISCVTargetParser.cpp | 4 +
.../RISCV/rvv/sifive-xsfmm-vset-insert.mir | 523 ++++++++++++++++++
13 files changed, 1027 insertions(+), 28 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive-xsfmm-vset-insert.mir
diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
index b1fca55a5afcd..2ac58a539d5ee 100644
--- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h
+++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h
@@ -161,6 +161,8 @@ inline static bool isAltFmt(unsigned VType) { return VType & 0x100; }
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS);
+LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS);
+
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul);
LLVM_ABI std::optional<VLMUL> getSameRatioLMUL(unsigned SEW, VLMUL VLMUL,
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
index 2b5f18d611524..a449e878b82eb 100644
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
@@ -1652,6 +1652,10 @@ bool RISCVAsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
return generateImmOutOfRangeError(
Operands, ErrorInfo, -1, (1 << 5) - 1,
"immediate must be non-zero in the range");
+ case Match_InvalidXSfmmVType: {
+ SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
+ return generateXSfmmVTypeError(ErrorLoc);
+ }
case Match_InvalidVTypeI: {
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
return generateVTypeError(ErrorLoc);
diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index 70b7c430c410e..503dab2f5f460 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -142,6 +142,22 @@ enum {
ReadsPastVLShift = DestEEWShift + 2,
ReadsPastVLMask = 1ULL << ReadsPastVLShift,
+
+ // 0 -> Don't care about altfmt bit in VTYPE.
+ // 1 -> Is not altfmt.
+ // 2 -> Is altfmt(BF16).
+ AltFmtTypeShift = ReadsPastVLShift + 1,
+ AltFmtTypeMask = 3ULL << AltFmtTypeShift,
+
+ // XSfmmbase
+ HasTWidenOpShift = AltFmtTypeShift + 2,
+ HasTWidenOpMask = 1ULL << HasTWidenOpShift,
+
+ HasTMOpShift = HasTWidenOpShift + 1,
+ HasTMOpMask = 1ULL << HasTMOpShift,
+
+ HasTKOpShift = HasTMOpShift + 1,
+ HasTKOpMask = 1ULL << HasTKOpShift,
};
// Helper functions to read TSFlags.
@@ -183,6 +199,11 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) {
return TSFlags & HasRoundModeOpMask;
}
+enum class AltFmtType { DontCare, NotAltFmt, AltFmt };
+static inline AltFmtType getAltFmtType(uint64_t TSFlags) {
+ return static_cast<AltFmtType>((TSFlags & AltFmtTypeMask) >> AltFmtTypeShift);
+}
+
/// \returns true if this instruction uses vxrm
static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; }
@@ -204,11 +225,47 @@ static inline bool readsPastVL(uint64_t TSFlags) {
return TSFlags & ReadsPastVLMask;
}
+// XSfmmbase
+static inline bool hasTWidenOp(uint64_t TSFlags) {
+ return TSFlags & HasTWidenOpMask;
+}
+
+static inline bool hasTMOp(uint64_t TSFlags) { return TSFlags & HasTMOpMask; }
+
+static inline bool hasTKOp(uint64_t TSFlags) { return TSFlags & HasTKOpMask; }
+
+static inline unsigned getTNOpNum(const MCInstrDesc &Desc) {
+ const uint64_t TSFlags = Desc.TSFlags;
+ assert(hasTWidenOp(TSFlags) && hasVLOp(TSFlags));
+ unsigned Offset = 3;
+ if (hasTKOp(TSFlags))
+ Offset = 4;
+ return Desc.getNumOperands() - Offset;
+}
+
+static inline unsigned getTMOpNum(const MCInstrDesc &Desc) {
+ const uint64_t TSFlags = Desc.TSFlags;
+ assert(hasTWidenOp(TSFlags) && hasTMOp(TSFlags));
+ if (hasTKOp(TSFlags))
+ return Desc.getNumOperands() - 5;
+ // vtzero.t
+ return Desc.getNumOperands() - 4;
+}
+
+static inline unsigned getTKOpNum(const MCInstrDesc &Desc) {
+ const uint64_t TSFlags = Desc.TSFlags;
+ assert(hasTWidenOp(TSFlags) && hasTKOp(TSFlags));
+ return Desc.getNumOperands() - 3;
+}
+
static inline unsigned getVLOpNum(const MCInstrDesc &Desc) {
const uint64_t TSFlags = Desc.TSFlags;
// This method is only called if we expect to have a VL operand, and all
// instructions with VL also have SEW.
assert(hasSEWOp(TSFlags) && hasVLOp(TSFlags));
+ // In Xsfmmbase, TN is an alias for VL, so here we use the same TSFlags bit.
+ if (hasTWidenOp(TSFlags))
+ return getTNOpNum(Desc);
unsigned Offset = 2;
if (hasVecPolicyOp(TSFlags))
Offset = 3;
@@ -226,7 +283,7 @@ static inline unsigned getSEWOpNum(const MCInstrDesc &Desc) {
const uint64_t TSFlags = Desc.TSFlags;
assert(hasSEWOp(TSFlags));
unsigned Offset = 1;
- if (hasVecPolicyOp(TSFlags))
+ if (hasVecPolicyOp(TSFlags) || hasTWidenOp(TSFlags))
Offset = 2;
return Desc.getNumOperands() - Offset;
}
@@ -243,6 +300,9 @@ static inline int getFRMOpNum(const MCInstrDesc &Desc) {
if (!hasRoundModeOp(TSFlags) || usesVXRM(TSFlags))
return -1;
+ if (hasTWidenOp(TSFlags) && hasTMOp(TSFlags))
+ return getTMOpNum(Desc) - 1;
+
// The operand order
// --------------------------------------
// | n-1 (if any) | n-2 | n-3 | n-4 |
@@ -385,7 +445,9 @@ enum OperandType : unsigned {
OPERAND_SEW_MASK,
// Vector rounding mode for VXRM or FRM.
OPERAND_VEC_RM,
- OPERAND_LAST_RISCV_IMM = OPERAND_VEC_RM,
+ // Vtype operand for XSfmm extension.
+ OPERAND_XSFMM_VTYPE,
+ OPERAND_LAST_RISCV_IMM = OPERAND_XSFMM_VTYPE,
// Operand is either a register or uimm5, this is used by V extension pseudo
// instructions to represent a value that be passed as AVL to either vsetvli
// or vsetivli.
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 90e1c47a71c89..2ca28539f18c2 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -164,10 +164,13 @@ struct DemandedFields {
// If this is true, we demand that VTYPE is set to some legal state, i.e. that
// vill is unset.
bool VILL = false;
+ bool UseTWiden = false;
+ bool UseAltFmt = false;
// Return true if any part of VTYPE was used
bool usedVTYPE() const {
- return SEW || LMUL || SEWLMULRatio || TailPolicy || MaskPolicy || VILL;
+ return SEW || LMUL || SEWLMULRatio || TailPolicy || MaskPolicy || VILL ||
+ UseTWiden || UseAltFmt;
}
// Return true if any property of VL was used
@@ -183,6 +186,8 @@ struct DemandedFields {
TailPolicy = true;
MaskPolicy = true;
VILL = true;
+ UseTWiden = true;
+ UseAltFmt = true;
}
// Mark all VL properties as demanded
@@ -208,6 +213,8 @@ struct DemandedFields {
TailPolicy |= B.TailPolicy;
MaskPolicy |= B.MaskPolicy;
VILL |= B.VILL;
+ UseAltFmt |= B.UseAltFmt;
+ UseTWiden |= B.UseTWiden;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -254,7 +261,9 @@ struct DemandedFields {
OS << "SEWLMULRatio=" << SEWLMULRatio << ", ";
OS << "TailPolicy=" << TailPolicy << ", ";
OS << "MaskPolicy=" << MaskPolicy << ", ";
- OS << "VILL=" << VILL;
+ OS << "VILL=" << VILL << ", ";
+ OS << "UseAltFmt=" << UseAltFmt << ", ";
+ OS << "UseTWiden=" << UseTWiden;
OS << "}";
}
#endif
@@ -324,6 +333,15 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType,
if (Used.MaskPolicy && RISCVVType::isMaskAgnostic(CurVType) !=
RISCVVType::isMaskAgnostic(NewVType))
return false;
+ if (Used.UseTWiden && (RISCVVType::hasXSfmmWiden(CurVType) !=
+ RISCVVType::hasXSfmmWiden(NewVType) ||
+ (RISCVVType::hasXSfmmWiden(CurVType) &&
+ RISCVVType::getXSfmmWiden(CurVType) !=
+ RISCVVType::getXSfmmWiden(NewVType))))
+ return false;
+ if (Used.UseAltFmt &&
+ RISCVVType::isAltFmt(CurVType) != RISCVVType::isAltFmt(NewVType))
+ return false;
return true;
}
@@ -475,6 +493,11 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
Res.TailPolicy = false;
}
+ Res.UseAltFmt = RISCVII::getAltFmtType(MI.getDesc().TSFlags) !=
+ RISCVII::AltFmtType::DontCare;
+ Res.UseTWiden = RISCVII::hasTWidenOp(MI.getDesc().TSFlags) ||
+ RISCVInstrInfo::isXSfmmVectorConfigInstr(MI);
+
return Res;
}
@@ -506,6 +529,8 @@ class VSETVLIInfo {
uint8_t TailAgnostic : 1;
uint8_t MaskAgnostic : 1;
uint8_t SEWLMULRatioOnly : 1;
+ uint8_t AltFmt : 1;
+ uint8_t TWiden : 3;
public:
VSETVLIInfo()
@@ -582,6 +607,8 @@ class VSETVLIInfo {
RISCVVType::VLMUL getVLMUL() const { return VLMul; }
bool getTailAgnostic() const { return TailAgnostic; }
bool getMaskAgnostic() const { return MaskAgnostic; }
+ bool getAltFmt() const { return AltFmt; }
+ unsigned getTWiden() const { return TWiden; }
bool hasNonZeroAVL(const LiveIntervals *LIS) const {
if (hasAVLImm())
@@ -643,21 +670,31 @@ class VSETVLIInfo {
SEW = RISCVVType::getSEW(VType);
TailAgnostic = RISCVVType::isTailAgnostic(VType);
MaskAgnostic = RISCVVType::isMaskAgnostic(VType);
+ AltFmt = RISCVVType::isAltFmt(VType);
+ TWiden =
+ RISCVVType::hasXSfmmWiden(VType) ? RISCVVType::getXSfmmWiden(VType) : 0;
}
- void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA) {
+ void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA, bool Altfmt,
+ unsigned W) {
assert(isValid() && !isUnknown() &&
"Can't set VTYPE for uninitialized or unknown");
VLMul = L;
SEW = S;
TailAgnostic = TA;
MaskAgnostic = MA;
+ AltFmt = Altfmt;
+ TWiden = W;
}
+ void setAltFmt(bool AF) { AltFmt = AF; }
+
void setVLMul(RISCVVType::VLMUL VLMul) { this->VLMul = VLMul; }
unsigned encodeVTYPE() const {
assert(isValid() && !isUnknown() && !SEWLMULRatioOnly &&
"Can't encode VTYPE for uninitialized or unknown");
+ if (TWiden != 0)
+ return RISCVVType::encodeXSfmmVType(SEW, TWiden, AltFmt);
return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic);
}
@@ -670,9 +707,9 @@ class VSETVLIInfo {
"Can't compare VTYPE in unknown state");
assert(!SEWLMULRatioOnly && !Other.SEWLMULRatioOnly &&
"Can't compare when only LMUL/SEW ratio is valid.");
- return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic) ==
+ return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt, TWiden) ==
std::tie(Other.VLMul, Other.SEW, Other.TailAgnostic,
- Other.MaskAgnostic);
+ Other.MaskAgnostic, Other.AltFmt, Other.TWiden);
}
unsigned getSEWLMULRatio() const {
@@ -821,7 +858,9 @@ class VSETVLIInfo {
<< "SEW=e" << (unsigned)SEW << ", "
<< "TailAgnostic=" << (bool)TailAgnostic << ", "
<< "MaskAgnostic=" << (bool)MaskAgnostic << ", "
- << "SEWLMULRatioOnly=" << (bool)SEWLMULRatioOnly << "}";
+ << "SEWLMULRatioOnly=" << (bool)SEWLMULRatioOnly << ", "
+ << "TWiden=" << (unsigned)TWiden << ", "
+ << "AltFmt=" << (bool)AltFmt << "}";
}
#endif
};
@@ -849,6 +888,11 @@ struct BlockData {
BlockData() = default;
};
+enum TKTMMode {
+ VSETTK = 0,
+ VSETTM = 1,
+};
+
class RISCVInsertVSETVLI : public MachineFunctionPass {
const RISCVSubtarget *ST;
const TargetInstrInfo *TII;
@@ -904,6 +948,7 @@ class RISCVInsertVSETVLI : public MachineFunctionPass {
VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) const;
VSETVLIInfo computeInfoForInstr(const MachineInstr &MI) const;
void forwardVSETVLIAVL(VSETVLIInfo &Info) const;
+ bool insertVSETMTK(MachineBasicBlock &MBB, TKTMMode Mode) const;
};
} // end anonymous namespace
@@ -941,6 +986,18 @@ RISCVInsertVSETVLI::getInfoForVSETVLI(const MachineInstr &MI) const {
VSETVLIInfo NewInfo;
if (MI.getOpcode() == RISCV::PseudoVSETIVLI) {
NewInfo.setAVLImm(MI.getOperand(1).getImm());
+ } else if (RISCVInstrInfo::isXSfmmVectorConfigTNInstr(MI)) {
+ assert(MI.getOpcode() == RISCV::PseudoSF_VSETTNT ||
+ MI.getOpcode() == RISCV::PseudoSF_VSETTNTX0);
+ switch (MI.getOpcode()) {
+ case RISCV::PseudoSF_VSETTNTX0:
+ NewInfo.setAVLVLMAX();
+ break;
+ case RISCV::PseudoSF_VSETTNT:
+ Register ATNReg = MI.getOperand(1).getReg();
+ NewInfo.setAVLRegDef(getVNInfoFromReg(ATNReg, MI, LIS), ATNReg);
+ break;
+ }
} else {
assert(MI.getOpcode() == RISCV::PseudoVSETVLI ||
MI.getOpcode() == RISCV::PseudoVSETVLIX0);
@@ -1001,11 +1058,36 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
RISCVVType::VLMUL VLMul = RISCVII::getLMul(TSFlags);
+ bool AltFmt = RISCVII::getAltFmtType(TSFlags) == RISCVII::AltFmtType::AltFmt;
+ InstrInfo.setAltFmt(AltFmt);
+
unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm();
// A Log2SEW of 0 is an operation on mask registers only.
unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
+ if (RISCVII::hasTWidenOp(TSFlags)) {
+ assert(RISCVVType::isValidSEW(SEW) && "Unexpected SEW");
+
+ const MachineOperand &TWidenOp =
+ MI.getOperand(MI.getNumExplicitOperands() - 1);
+ unsigned TWiden = TWidenOp.getImm();
+
+ InstrInfo.setAVLVLMAX();
+ if (RISCVII::hasVLOp(TSFlags)) {
+ const MachineOperand &TNOp =
+ MI.getOperand(RISCVII::getTNOpNum(MI.getDesc()));
+
+ if (TNOp.getReg().isVirtual())
+ InstrInfo.setAVLRegDef(getVNInfoFromReg(TNOp.getReg(), MI, LIS),
+ TNOp.getReg());
+ }
+
+ InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt, TWiden);
+
+ return InstrInfo;
+ }
+
if (RISCVII::hasVLOp(TSFlags)) {
const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
if (VLOp.isImm()) {
@@ -1041,7 +1123,9 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
assert(SEW == EEW && "Initial SEW doesn't match expected EEW");
}
#endif
- InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic);
+ // TODO: Propagate the twiden from previous vtype for potential reuse.
+ InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt,
+ /*TWiden*/ 0);
forwardVSETVLIAVL(InstrInfo);
@@ -1049,10 +1133,33 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const {
}
void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator InsertPt, DebugLoc DL,
- const VSETVLIInfo &Info, const VSETVLIInfo &PrevInfo) {
-
+ MachineBasicBlock::iterator InsertPt,
+ DebugLoc DL, const VSETVLIInfo &Info,
+ const VSETVLIInfo &PrevInfo) {
++NumInsertedVSETVL;
+
+ if (Info.getTWiden()) {
+ if (Info.hasAVLVLMAX()) {
+ Register DestReg = MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass);
+ auto MI = BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoSF_VSETTNTX0))
+ .addReg(DestReg, RegState::Define | RegState::Dead)
+ .addReg(RISCV::X0, RegState::Kill)
+ .addImm(Info.encodeVTYPE());
+ if (LIS) {
+ LIS->InsertMachineInstrInMaps(*MI);
+ LIS->createAndComputeVirtRegInterval(DestReg);
+ }
+ } else {
+ auto MI = BuildMI(MBB, InsertPt, DL, TII->get(RISCV::PseudoSF_VSETTNT))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addReg(Info.getAVLReg())
+ .addImm(Info.encodeVTYPE());
+ if (LIS)
+ LIS->InsertMachineInstrInMaps(*MI);
+ }
+ return;
+ }
+
if (PrevInfo.isValid() && !PrevInfo.isUnknown()) {
// Use X0, X0 form if the AVL is the same and the SEW+LMUL gives the same
// VLMAX.
@@ -1194,7 +1301,8 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
// be coalesced into another vsetvli since we won't demand any fields.
VSETVLIInfo NewInfo; // Need a new VSETVLIInfo to clear SEWLMULRatioOnly
NewInfo.setAVLImm(1);
- NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true);
+ NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true,
+ /*AltFmt*/ false, /*W*/ 0);
Info = NewInfo;
return;
}
@@ -1236,7 +1344,9 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info,
(Demanded.TailPolicy ? IncomingInfo : Info).getTailAgnostic() ||
IncomingInfo.getTailAgnostic(),
(Demanded.MaskPolicy ? IncomingInfo : Info).getMaskAgnostic() ||
- IncomingInfo.getMaskAgnostic());
+ IncomingInfo.getMaskAgnostic(),
+ (Demanded.UseAltFmt ? IncomingInfo : Info).getAltFmt(),
+ Demanded.UseTWiden ? IncomingInfo.getTWiden() : 0);
// If we only knew the sew/lmul ratio previously, replace the VTYPE but keep
// the AVL.
@@ -1289,7 +1399,8 @@ bool RISCVInsertVSETVLI::computeVLVTYPEChanges(const MachineBasicBlock &MBB,
if (RISCVInstrInfo::isVectorConfigInstr(MI) ||
RISCVII::hasSEWOp(MI.getDesc().TSFlags) ||
- isVectorCopy(ST->getRegisterInfo(), MI))
+ isVectorCopy(ST->getRegisterInfo(), MI) ||
+ RISCVInstrInfo::isXSfmmVectorConfigInstr(MI))
HadVectorOp = true;
transferAfter(Info, MI);
@@ -1671,6 +1782,10 @@ void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &MBB) const {
};
for (MachineInstr &MI : make_early_inc_range(reverse(MBB))) {
+ // TODO: Support XSfmm.
+ if (RISCVII::hasTWidenOp(MI.getDesc().TSFlags) ||
+ RISCVInstrInfo::isXSfmmVectorConfigInstr(MI))
+ continue;
if (!RISCVInstrInfo::isVectorConfigInstr(MI)) {
Used.doUnion(getDemanded(MI, ST));
@@ -1784,6 +1899,83 @@ void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) {
}
}
+static void shrinkIntervalAndRemoveDeadMI(MachineOperand &MO,
+ LiveIntervals *LIS,
+ const TargetInstrInfo *TII) {
+ Register Reg = MO.getReg();
+ MO.setReg(RISCV::NoRegister);
+ MO.setIsKill(false);
+
+ if (!LIS)
+ return;
+
+ LiveInterval &LI = LIS->getInterval(Reg);
+
+ // Erase the AVL operand from the instruction.
+ SmallVector<MachineInstr *> DeadMIs;
+ LIS->shrinkToUses(&LI, &DeadMIs);
+ // TODO: Enable this once needVSETVLIPHI is supported.
+ // SmallVector<LiveInterval *> SplitLIs;
+ // LIS->splitSeparateComponents(LI, SplitLIs);
+
+ for (MachineInstr *DeadMI : DeadMIs) {
+ if (!TII->isAddImmediate(*DeadMI, Reg))
+ continue;
+ LIS->RemoveMachineInstrFromMaps(*DeadMI);
+ DeadMI->eraseFromParent();
+ }
+}
+
+bool RISCVInsertVSETVLI::insertVSETMTK(MachineBasicBlock &MBB,
+ TKTMMode Mode) const {
+
+ bool Changed = false;
+ for (auto &MI : MBB) {
+ uint64_t TSFlags = MI.getDesc().TSFlags;
+ if (RISCVInstrInfo::isXSfmmVectorConfigTMTKInstr(MI) ||
+ !RISCVII::hasSEWOp(TSFlags) || !RISCVII::hasTWidenOp(TSFlags))
+ continue;
+
+ VSETVLIInfo CurrInfo = computeInfoForInstr(MI);
+
+ if (Mode == VSETTK && !RISCVII::hasTKOp(TSFlags))
+ continue;
+
+ if (Mode == VSETTM && !RISCVII::hasTMOp(TSFlags))
+ continue;
+
+ unsigned OpNum = 0;
+ unsigned Opcode = 0;
+ switch (Mode) {
+ case VSETTK:
+ OpNum = RISCVII::getTKOpNum(MI.getDesc());
+ Opcode = RISCV::PseudoSF_VSETTK;
+ break;
+ case VSETTM:
+ OpNum = RISCVII::getTMOpNum(MI.getDesc());
+ Opcode = RISCV::PseudoSF_VSETTM;
+ break;
+ }
+
+ assert(OpNum && Opcode && "Invalid OpNum or Opcode");
+
+ const MachineOperand &Op = MI.getOperand(OpNum);
+
+ auto TmpMI = BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(Opcode))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ .addReg(Op.getReg())
+ .addImm(Log2_32(CurrInfo.getSEW()))
+ .addImm(Log2_32(CurrInfo.getTWiden()) + 1);
+
+ Changed = true;
+ if (LIS)
+ LIS->InsertMachineInstrInMaps(*TmpMI);
+
+ shrinkIntervalAndRemoveDeadMI(MI.getOperand(OpNum), LIS, TII);
+ }
+ return Changed;
+}
+
bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
// Skip if the vector extension is not enabled.
ST = &MF.getSubtarget<RISCVSubtarget>();
@@ -1861,6 +2053,11 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
for (MachineBasicBlock &MBB : MF)
insertReadVL(MBB);
+ for (MachineBasicBlock &MBB : MF) {
+ insertVSETMTK(MBB, VSETTM);
+ insertVSETMTK(MBB, VSETTK);
+ }
+
BlockInfo.clear();
return HaveVectorOp;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
index 2afd77a96373b..5b06303092c0d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td
@@ -267,6 +267,22 @@ class RVInstCommon<dag outs, dag ins, string opcodestr, string argstr,
// operands' VLs.
bit ReadsPastVL = 0;
let TSFlags{26} = ReadsPastVL;
+
+ // 0 -> Don't care about altfmt bit in VTYPE.
+ // 1 -> Is not altfmt.
+ // 2 -> Is altfmt(BF16).
+ bits<2> AltFmtType = 0;
+ let TSFlags{28-27} = AltFmtType;
+
+ // XSfmmbase
+ bit HasTWidenOp = 0;
+ let TSFlags{29} = HasTWidenOp;
+
+ bit HasTmOp = 0;
+ let TSFlags{30} = HasTmOp;
+
+ bit HasTkOp = 0;
+ let TSFlags{31} = HasTkOp;
}
class RVInst<dag outs, dag ins, string opcodestr, string argstr,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 0ed97c61ec78a..c8df607f347db 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -2974,6 +2974,9 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
else
Ok = RISCVFPRndMode::isValidRoundingMode(Imm);
break;
+ case RISCVOp::OPERAND_XSFMM_VTYPE:
+ Ok = RISCVVType::isValidXSfmmVType(Imm);
+ break;
}
if (!Ok) {
ErrInfo = "Invalid immediate";
@@ -3639,6 +3642,11 @@ std::string RISCVInstrInfo::createMIROperandComment(
RISCVVType::printVType(Imm, OS);
break;
}
+ case RISCVOp::OPERAND_XSFMM_VTYPE: {
+ unsigned Imm = Op.getImm();
+ RISCVVType::printXSfmmVType(Imm, OS);
+ break;
+ }
case RISCVOp::OPERAND_SEW:
case RISCVOp::OPERAND_SEW_MASK: {
unsigned Log2SEW = Op.getImm();
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index b4be9e0c09b3e..80ae74bdd20cf 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -128,6 +128,9 @@ defvar TAIL_AGNOSTIC = 1;
defvar TU_MU = 0;
defvar TA_MU = 1;
defvar TA_MA = 3;
+defvar DONT_CARE_ALTFMT = 0;
+defvar IS_NOT_ALTFMT = 1;
+defvar IS_ALTFMT = 2;
//===----------------------------------------------------------------------===//
// Utilities.
@@ -159,7 +162,8 @@ class PseudoToVInst<string PseudoInst> {
["_M4", ""],
["_M8", ""],
["_SE", ""],
- ["_RM", ""]
+ ["_RM", ""],
+ ["_ALT", ""]
];
string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst,
!subst(AffixSubst[0], AffixSubst[1], Acc));
@@ -6392,7 +6396,7 @@ let Defs = [VXSAT] in {
// 13. Vector Floating-Point Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasVInstructionsAnyF] in {
+let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in {
//===----------------------------------------------------------------------===//
// 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions
//===----------------------------------------------------------------------===//
@@ -6561,7 +6565,7 @@ defm PseudoVFNCVT_F_F : VPseudoVNCVTD_W_RM;
defm PseudoVFNCVT_ROD_F_F : VPseudoVNCVTD_W;
} // mayRaiseFPException = true
-} // Predicates = [HasVInstructionsAnyF]
+} // Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT
//===----------------------------------------------------------------------===//
// 14. Vector Reduction Operations
@@ -6589,7 +6593,7 @@ defm PseudoVWREDSUM : VPseudoVWRED_VS;
}
} // Predicates = [HasVInstructions]
-let Predicates = [HasVInstructionsAnyF] in {
+let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in {
//===----------------------------------------------------------------------===//
// 14.3. Vector Single-Width Floating-Point Reduction Instructions
//===----------------------------------------------------------------------===//
@@ -6608,7 +6612,7 @@ defm PseudoVFWREDUSUM : VPseudoVFWRED_VS_RM;
defm PseudoVFWREDOSUM : VPseudoVFWREDO_VS_RM;
}
-} // Predicates = [HasVInstructionsAnyF]
+} // Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT
//===----------------------------------------------------------------------===//
// 15. Vector Mask Instructions
@@ -6699,7 +6703,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
// 16.2. Floating-Point Scalar Move Instructions
//===----------------------------------------------------------------------===//
-let Predicates = [HasVInstructionsAnyF] in {
+let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
foreach f = FPList in {
let HasSEWOp = 1, BaseInstr = VFMV_F_S in
@@ -6715,7 +6719,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>;
}
}
-} // Predicates = [HasVInstructionsAnyF]
+} // Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT
//===----------------------------------------------------------------------===//
// 16.3. Vector Slide Instructions
@@ -6727,10 +6731,10 @@ let Predicates = [HasVInstructions] in {
defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
} // Predicates = [HasVInstructions]
-let Predicates = [HasVInstructionsAnyF] in {
+let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in {
defm PseudoVFSLIDE1UP : VPseudoVSLD1_VF<"@earlyclobber $rd">;
defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF;
-} // Predicates = [HasVInstructionsAnyF]
+} // Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT
//===----------------------------------------------------------------------===//
// 16.4. Vector Register Gather Instructions
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index b546339ce99e2..fe357e3084179 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -438,8 +438,10 @@ let Predicates = [HasVendorXSfvcp] in {
}
foreach f = FPList in {
foreach m = f.MxList in {
- defm f.FX # "V" : VPseudoVC_XV<m, f.fprclass, payload1>;
- defm f.FX # "VV" : VPseudoVC_XVV<m, f.fprclass, payload1>;
+ let AltFmtType = IS_NOT_ALTFMT in {
+ defm f.FX # "V" : VPseudoVC_XV<m, f.fprclass, payload1>;
+ defm f.FX # "VV" : VPseudoVC_XVV<m, f.fprclass, payload1>;
+ }
}
}
foreach m = MxListW in {
@@ -449,7 +451,8 @@ let Predicates = [HasVendorXSfvcp] in {
}
foreach f = FPListW in {
foreach m = f.MxList in
- defm f.FX # "VW" : VPseudoVC_XVW<m, f.fprclass, payload1>;
+ let AltFmtType = IS_NOT_ALTFMT in
+ defm f.FX # "VW" : VPseudoVC_XVW<m, f.fprclass, payload1>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td
index a5ee701386b6d..c405feeb7d6f5 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td
@@ -277,3 +277,141 @@ let Uses = [FRM], mayRaiseFPException = true in {
} // Predicates = [HasVendorXSfmm32a8f]
} // DecoderNamespace = "XSfvector"
+
+class VPseudoSF_VTileLoad
+ : RISCVVPseudo<(outs), (ins GPR:$rs2, GPR:$rs1, AVL:$atn, ixlenimm:$sew,
+ ixlenimm:$twiden)> {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let HasVLOp = 1; // Tn
+ let HasSEWOp = 1;
+ let HasTWidenOp = 1;
+ let hasSideEffects = 1;
+}
+
+class VPseudoSF_VTileStore
+ : RISCVVPseudo<(outs), (ins GPR:$rs2, GPR:$rs1, AVL:$atn, ixlenimm:$sew,
+ ixlenimm:$twiden)> {
+ let mayLoad = 0;
+ let mayStore = 1;
+ let HasVLOp = 1; // Tn
+ let HasSEWOp = 1;
+ let HasTWidenOp = 1;
+ let hasSideEffects = 1;
+}
+
+class VPseudoSF_VTileMove_V_T
+ : RISCVVPseudo<(outs VRM8:$vd), (ins GPR:$rs1, AVL:$atn, ixlenimm:$sew,
+ ixlenimm:$twiden)> {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1; // Tn
+ let HasSEWOp = 1;
+ let HasTWidenOp = 1;
+ let hasSideEffects = 1;
+}
+
+class VPseudoSF_VTileMove_T_V
+ : RISCVVPseudo<(outs), (ins GPR:$rs1, VRM8:$vs2, AVL:$atn, ixlenimm:$sew,
+ ixlenimm:$twiden)> {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasVLOp = 1; // Tn
+ let HasSEWOp = 1;
+ let HasTWidenOp = 1;
+ let hasSideEffects = 1;
+}
+
+class VPseudoSF_MatMul<RegisterClass mtd_class>
+ : RISCVVPseudo<(outs),
+ (ins mtd_class:$rd, VRM8:$vs2, VRM8:$vs1, AVL:$atm, AVL:$atn,
+ AVL:$atk, ixlenimm:$sew, ixlenimm:$twiden)> {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasTmOp = 1;
+ let HasVLOp = 1; // Tn
+ let HasTkOp = 1;
+ let HasSEWOp = 1;
+ let HasTWidenOp = 1;
+ let hasSideEffects = 1;
+}
+
+class VPseudoSF_MatMul_FRM<RegisterClass mtd_class>
+ : RISCVVPseudo<(outs),
+ (ins mtd_class:$rd, VRM8:$vs2, VRM8:$vs1, ixlenimm:$frm,
+ AVL:$atm, AVL:$atn, AVL:$atk, ixlenimm:$sew,
+ ixlenimm:$twiden), []> {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let HasTmOp = 1;
+ let HasVLOp = 1; // Tn
+ let HasTkOp = 1;
+ let HasSEWOp = 1;
+ let HasRoundModeOp = 1;
+ let hasPostISelHook = 1;
+ let HasTWidenOp = 1;
+ let hasSideEffects = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
+let Defs = [VL, VTYPE] in {
+ def PseudoSF_VSETTNT
+ : Pseudo<(outs GPR:$rd),
+ (ins GPRNoX0:$rs1, XSfmmVTypeOp:$vtypei), []>,
+ PseudoInstExpansion<(VSETVLI GPR:$rd, GPR:$rs1, VTypeIOp11:$vtypei)>,
+ Sched<[WriteVSETVLI, ReadVSETVLI]>;
+ def PseudoSF_VSETTNTX0
+ : Pseudo<(outs GPRNoX0:$rd),
+ (ins GPRX0:$rs1, XSfmmVTypeOp:$vtypei), []>,
+ PseudoInstExpansion<(VSETVLI GPR:$rd, GPR:$rs1, VTypeIOp11:$vtypei)>,
+ Sched<[WriteVSETVLI, ReadVSETVLI]>;
+ def PseudoSF_VSETTNTX0X0
+ : Pseudo<(outs GPRX0:$rd),
+ (ins GPRX0:$rs1, XSfmmVTypeOp:$vtypei), []>,
+ PseudoInstExpansion<(VSETVLI GPR:$rd, GPR:$rs1, VTypeIOp11:$vtypei)>,
+ Sched<[WriteVSETVLI, ReadVSETVLI]>;
+}
+
+let Defs = [VTYPE], Uses = [VTYPE], HasTWidenOp = 1, HasSEWOp = 1 in {
+ def PseudoSF_VSETTM
+ : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, ixlenimm:$log2sew, ixlenimm:$twiden), []>,
+ PseudoInstExpansion<(SF_VSETTM GPR:$rd, GPR:$rs1)>,
+ Sched<[WriteVSETVLI, ReadVSETVLI]>;
+ def PseudoSF_VSETTK
+ : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, ixlenimm:$logwsew, ixlenimm:$twiden), []>,
+ PseudoInstExpansion<(SF_VSETTK GPR:$rd, GPR:$rs1)>,
+ Sched<[WriteVSETVLI, ReadVSETVLI]>;
+}
+}
+
+foreach eew = [8, 16, 32, 64] in {
+ def PseudoSF_VLTE # eew : VPseudoSF_VTileLoad;
+ def PseudoSF_VSTE # eew : VPseudoSF_VTileStore;
+}
+
+def PseudoSF_VTMV_T_V : VPseudoSF_VTileMove_T_V;
+def PseudoSF_VTMV_V_T : VPseudoSF_VTileMove_V_T;
+
+foreach a = I8Encodes in
+ foreach b = I8Encodes in
+ def PseudoSF_MM_ # !toupper(a.Name) # _ # !toupper(b.Name)
+ : VPseudoSF_MatMul<TRM4>;
+
+let AltFmtType = IS_NOT_ALTFMT in
+ def PseudoSF_MM_F_F : VPseudoSF_MatMul_FRM<TRM2>;
+let AltFmtType = IS_ALTFMT in
+ def PseudoSF_MM_F_F_ALT : VPseudoSF_MatMul_FRM<TRM2>;
+
+foreach e1 = [5, 4] in
+ foreach e2 = [5, 4] in
+ def PseudoSF_MM_E # e1 # M # !sub(7, e1) # _E # e2 # M # !sub(7, e2)
+ : VPseudoSF_MatMul_FRM<TRM4>;
+
+let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
+ let HasVLOp = 1, HasTmOp = 1, HasTWidenOp = 1, HasSEWOp = 1 in
+ def PseudoSF_VTZERO_T
+ : RISCVVPseudo<(outs),
+ (ins TR:$rd, AVL:$atm, AVL:$atn, ixlenimm:$sew, ixlenimm:$twiden)>;
+ def PseudoSF_VTDISCARD : RISCVVPseudo<(outs), (ins), []>;
+}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
index 6d86aff581604..75d31abc30ca8 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrPredicates.td
@@ -74,7 +74,41 @@ def isVectorConfigInstr
PseudoVSETVLI,
PseudoVSETVLIX0,
PseudoVSETVLIX0X0,
- PseudoVSETIVLI
+ PseudoVSETIVLI,
+ PseudoSF_VSETTNT,
+ PseudoSF_VSETTNTX0,
+ PseudoSF_VSETTNTX0X0
+ ]>>>;
+
+// Returns true if this is a PseudoSF_VSETTNT* instructions.
+def isXSfmmVectorConfigTNInstr
+ : TIIPredicate<"isXSfmmVectorConfigTNInstr",
+ MCReturnStatement<
+ CheckOpcode<[
+ PseudoSF_VSETTNT,
+ PseudoSF_VSETTNTX0,
+ PseudoSF_VSETTNTX0X0
+ ]>>>;
+
+// Returns true if this is PseudoSF_VSETTM or PseudoSF_VSETTK.
+def isXSfmmVectorConfigTMTKInstr
+ : TIIPredicate<"isXSfmmVectorConfigTMTKInstr",
+ MCReturnStatement<
+ CheckOpcode<[
+ PseudoSF_VSETTM,
+ PseudoSF_VSETTK
+ ]>>>;
+
+// Returns true if this is a XSfmm vector configuration instruction.
+def isXSfmmVectorConfigInstr
+ : TIIPredicate<"isXSfmmVectorConfigInstr",
+ MCReturnStatement<
+ CheckOpcode<[
+ PseudoSF_VSETTNT,
+ PseudoSF_VSETTNTX0,
+ PseudoSF_VSETTNTX0X0,
+ PseudoSF_VSETTM,
+ PseudoSF_VSETTK
]>>>;
// Return true if this is 'vsetvli x0, x0, vtype' which preserves
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
index 40b641680b2ce..e9f43b9a71648 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp
@@ -178,6 +178,10 @@ BitVector RISCVRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Shadow stack pointer.
markSuperRegs(Reserved, RISCV::SSP);
+ // XSfmmbase
+ for (MCPhysReg Reg = RISCV::T0; Reg <= RISCV::T15; Reg++)
+ markSuperRegs(Reserved, Reg);
+
assert(checkAllSuperRegsMarked(Reserved));
return Reserved;
}
diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp
index acf8e4cf7c6d2..5ea63a973ea37 100644
--- a/llvm/lib/TargetParser/RISCVTargetParser.cpp
+++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp
@@ -228,6 +228,10 @@ void printVType(unsigned VType, raw_ostream &OS) {
OS << ", mu";
}
+void printXSfmmVType(unsigned VType, raw_ostream &OS) {
+ OS << "e" << getSEW(VType) << ", w" << getXSfmmWiden(VType);
+}
+
unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul) {
unsigned LMul;
bool Fractional;
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive-xsfmm-vset-insert.mir b/llvm/test/CodeGen/RISCV/rvv/sifive-xsfmm-vset-insert.mir
new file mode 100644
index 0000000000000..8b06886c12f21
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive-xsfmm-vset-insert.mir
@@ -0,0 +1,523 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc %s -o - -mtriple=riscv64 -mattr=+v \
+# RUN: -run-pass=phi-node-elimination,register-coalescer,riscv-insert-vsetvli | FileCheck %s
+
+--- |
+ define void @xsfmm_same_state(<vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 noundef %tm, i64 noundef %tn, i64 noundef %tk) {
+ entry:
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2)
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2)
+ ret void
+ }
+
+ define void @xsfmm_different_state(<vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk) {
+ entry:
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2)
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 4)
+ ret void
+ }
+
+ define void @xsfmm_different_state_bf(<vscale x 32 x half> %tile1, <vscale x 32 x bfloat> %tile2, i64 %tm, i64 %tn, i64 %tk) {
+ entry:
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile1, i64 %tm, i64 %tn, i64 %tk, i64 2)
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32bf16(i64 2, <vscale x 32 x bfloat> %tile2, <vscale x 32 x bfloat> %tile2, i64 %tm, i64 %tn, i64 %tk, i64 2)
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 2, <vscale x 32 x half> %tile1, <vscale x 32 x half> %tile1, i64 %tm, i64 %tn, i64 %tk, i64 2)
+ ret void
+ }
+
+ define <vscale x 64 x i8> @interleave_rvv_and_xsfmm(<vscale x 64 x i8> %tile, i64 %vl, ptr %base) {
+ entry:
+ %0 = call <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.i64(i64 1, i64 %vl)
+ %1 = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %tile, <vscale x 64 x i8> %0, i64 %vl)
+ call void @llvm.riscv.sf.vste16.i64(i64 1, ptr %base, i64 %vl)
+ ret <vscale x 64 x i8> %1
+ }
+
+ define <vscale x 64 x i8> @interleave_rvv_and_xsfmm2(<vscale x 64 x i8> %tile, i64 %vl, ptr %base) {
+ entry:
+ %0 = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %tile, <vscale x 64 x i8> %tile, i64 %vl)
+ %1 = call <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.i64(i64 1, i64 %vl)
+ %2 = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> %tile, <vscale x 64 x i8> %0, i64 %vl)
+ call void @llvm.riscv.sf.vste16.i64(i64 1, ptr %base, i64 %vl)
+ ret <vscale x 64 x i8> %2
+ }
+
+ define void @consecutive_xsfmm(<vscale x 32 x half> %tile, i64 %tm, i64 %tn, i64 %tk, ptr %base) {
+ entry:
+ tail call void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64 0, <vscale x 32 x half> %tile, <vscale x 32 x half> %tile, i64 %tm, i64 %tn, i64 %tk, i64 2)
+ call void @llvm.riscv.sf.vste16.i64(i64 0, ptr %base, i64 %tn)
+ ret void
+ }
+
+ define i64 @vsettnt_max(i64 %vl) {
+ entry:
+ %0 = call i64 @llvm.riscv.sf.vsettm.i64(i64 %vl, i64 1, i64 2)
+ %1 = call i64 @llvm.riscv.sf.vsettnt_max.i64(i64 1, i64 2)
+ ret i64 %0
+ }
+
+ define i64 @single_vsettm(i64 %vl) {
+ entry:
+ %0 = call i64 @llvm.riscv.sf.vsettm.i64(i64 %vl, i64 1, i64 2)
+ ret i64 %0
+ }
+
+ define i64 @single_vsettn(i64 %vl) {
+ entry:
+ %0 = call i64 @llvm.riscv.sf.vsettn.i64(i64 %vl, i64 1, i64 2)
+ ret i64 %0
+ }
+
+ define i64 @single_vsettk(i64 %vl) {
+ entry:
+ %0 = call i64 @llvm.riscv.sf.vsettk.i64(i64 %vl, i64 1, i64 2)
+ ret i64 %0
+ }
+
+ define void @sf_vtzero(i64 %tm, i64 %tn) {
+ entry:
+ call void @llvm.riscv.sf.vtzero.i64(i64 1, i64 %tm, i64 %tn, i64 3, i64 4)
+ ret void
+ }
+
+ declare void @llvm.riscv.sf.mm.f.f.i64.nxv32f16(i64, <vscale x 32 x half>, <vscale x 32 x half>, i64, i64, i64, i64)
+ declare void @llvm.riscv.sf.mm.f.f.i64.nxv32bf16(i64, <vscale x 32 x bfloat>, <vscale x 32 x bfloat>, i64, i64, i64, i64)
+ declare <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.i64(i64, i64)
+ declare <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, i64)
+ declare void @llvm.riscv.sf.vste16.i64(i64, ptr, i64)
+ declare i64 @llvm.riscv.sf.vsettnt_max.i64(i64, i64)
+ declare i64 @llvm.riscv.sf.vsettm.i64(i64, i64, i64)
+ declare i64 @llvm.riscv.sf.vsettn.i64(i64, i64, i64)
+ declare i64 @llvm.riscv.sf.vsettk.i64(i64, i64, i64)
+ declare void @llvm.riscv.sf.vtzero.i64(i64, i64, i64, i64, i64)
+...
+---
+name: xsfmm_same_state
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vrm8 }
+ - { id: 1, class: vrm8 }
+ - { id: 2, class: gprnox0 }
+ - { id: 3, class: gprnox0 }
+ - { id: 4, class: gprnox0 }
+liveins:
+ - { reg: '$v8m8', virtual-reg: '%0' }
+ - { reg: '$v8m8', virtual-reg: '%1' }
+ - { reg: '$x10', virtual-reg: '%2' }
+ - { reg: '$x11', virtual-reg: '%3' }
+ - { reg: '$x12', virtual-reg: '%4' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8, $x10, $x11, $x12
+ ; CHECK-LABEL: name: xsfmm_same_state
+ ; CHECK: liveins: $v8m8, $v16m8, $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: PseudoRET
+ %4:gprnox0 = COPY $x12
+ %3:gprnox0 = COPY $x11
+ %2:gprnox0 = COPY $x10
+ %1:vrm8 = COPY $v16m8
+ %0:vrm8 = COPY $v8m8
+ PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm
+ PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm
+ PseudoRET
+...
+---
+name: xsfmm_different_state
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vrm8 }
+ - { id: 1, class: vrm8 }
+ - { id: 2, class: gprnox0 }
+ - { id: 3, class: gprnox0 }
+ - { id: 4, class: gprnox0 }
+liveins:
+ - { reg: '$v8m8', virtual-reg: '%0' }
+ - { reg: '$v8m8', virtual-reg: '%1' }
+ - { reg: '$x10', virtual-reg: '%2' }
+ - { reg: '$x11', virtual-reg: '%3' }
+ - { reg: '$x12', virtual-reg: '%4' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8, $x10, $x11, $x12
+ ; CHECK-LABEL: name: xsfmm_different_state
+ ; CHECK: liveins: $v8m8, $v16m8, $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1544 /* e16, w4 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 3, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 3, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 4, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: PseudoRET
+ %4:gprnox0 = COPY $x12
+ %3:gprnox0 = COPY $x11
+ %2:gprnox0 = COPY $x10
+ %1:vrm8 = COPY $v16m8
+ %0:vrm8 = COPY $v8m8
+ PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm
+ PseudoSF_MM_F_F $t2, %0:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 4, implicit $frm
+ PseudoRET
+...
+---
+name: xsfmm_different_state_bf
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vrm8 }
+ - { id: 1, class: vrm8 }
+ - { id: 2, class: gprnox0 }
+ - { id: 3, class: gprnox0 }
+ - { id: 4, class: gprnox0 }
+liveins:
+ - { reg: '$v8m8', virtual-reg: '%0' }
+ - { reg: '$v8m8', virtual-reg: '%1' }
+ - { reg: '$x10', virtual-reg: '%2' }
+ - { reg: '$x11', virtual-reg: '%3' }
+ - { reg: '$x12', virtual-reg: '%4' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $v16m8, $x10, $x11, $x12
+ ; CHECK-LABEL: name: xsfmm_different_state_bf
+ ; CHECK: liveins: $v8m8, $v16m8, $x10, $x11, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x12
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vrm8 = COPY $v16m8
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY4]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1288 /* e16, w2 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F_ALT $t2, [[COPY3]], [[COPY3]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY2]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY4]], [[COPY4]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: PseudoRET
+ %4:gprnox0 = COPY $x12
+ %3:gprnox0 = COPY $x11
+ %2:gprnox0 = COPY $x10
+ %1:vrm8 = COPY $v16m8
+ %0:vrm8 = COPY $v8m8
+ PseudoSF_MM_F_F $t2, %0:vrm8, %0:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm
+ PseudoSF_MM_F_F_ALT $t2, %1:vrm8, %1:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm
+ PseudoSF_MM_F_F $t2, %0:vrm8, %0:vrm8, 7, %2:gprnox0, %3:gprnox0, %4:gprnox0, 4, 2, implicit $frm
+ PseudoRET
+...
+---
+name: interleave_rvv_and_xsfmm
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vrm8 }
+ - { id: 1, class: gprnox0 }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+ - { id: 4, class: vrm8 }
+ - { id: 5, class: vrm8 }
+liveins:
+ - { reg: '$v8m8', virtual-reg: '%0' }
+ - { reg: '$x10', virtual-reg: '%1' }
+ - { reg: '$x11', virtual-reg: '%2' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $x10, $x11
+ ; CHECK-LABEL: name: interleave_rvv_and_xsfmm
+ ; CHECK: liveins: $v8m8, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 512 /* e8, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoSF_VTMV_V_T:%[0-9]+]]:vrm8 = PseudoSF_VTMV_V_T [[ADDI]], $noreg, 3, 1, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY1]], 195 /* e8, m8, ta, ma */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 $noreg, [[COPY2]], [[PseudoSF_VTMV_V_T]], $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: PseudoSF_VSTE16 [[ADDI]], [[COPY]], $noreg, 4, 1, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_]], implicit $vtype
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %2:gpr = COPY $x11
+ %1:gprnox0 = COPY $x10
+ %0:vrm8 = COPY $v8m8
+ %3:gpr = ADDI $x0, 1
+ %4:vrm8 = PseudoSF_VTMV_V_T %3:gpr, %1:gprnox0, 3, 1
+ %5:vrm8 = PseudoVADD_VV_M8 $noreg, %0:vrm8, killed %4:vrm8, %1:gprnox0, 3, 0
+ PseudoSF_VSTE16 %3:gpr, %2:gpr, %1:gprnox0, 4, 1
+ $v8m8 = COPY %5:vrm8
+ PseudoRET implicit $v8m8
+...
+---
+name: interleave_rvv_and_xsfmm2
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vrm8 }
+ - { id: 1, class: gprnox0 }
+ - { id: 2, class: gpr }
+ - { id: 3, class: gpr }
+ - { id: 4, class: vrm8 }
+ - { id: 5, class: vrm8 }
+liveins:
+ - { reg: '$v8m8', virtual-reg: '%0' }
+ - { reg: '$x10', virtual-reg: '%1' }
+ - { reg: '$x11', virtual-reg: '%2' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $x10, $x11
+ ; CHECK-LABEL: name: interleave_rvv_and_xsfmm2
+ ; CHECK: liveins: $v8m8, $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI $x0, 1
+ ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY1]], 195 /* e8, m8, ta, ma */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoVADD_VV_M8_:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 $noreg, [[COPY2]], [[COPY2]], $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 512 /* e8, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead [[PseudoSF_VTMV_V_T:%[0-9]+]]:vrm8 = PseudoSF_VTMV_V_T [[ADDI]], $noreg, 3, 1, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY1]], 195 /* e8, m8, ta, ma */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoVADD_VV_M8_1:%[0-9]+]]:vrm8 = PseudoVADD_VV_M8 $noreg, [[PseudoVADD_VV_M8_]], [[PseudoVADD_VV_M8_]], $noreg, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: PseudoSF_VSTE16 [[ADDI]], [[COPY]], $noreg, 4, 1, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: $v8m8 = COPY [[PseudoVADD_VV_M8_1]], implicit $vtype
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %2:gpr = COPY $x11
+ %1:gprnox0 = COPY $x10
+ %0:vrm8 = COPY $v8m8
+ %3:gpr = ADDI $x0, 1
+ %4:vrm8 = PseudoVADD_VV_M8 $noreg, %0:vrm8, killed %0:vrm8, %1:gprnox0, 3, 0
+ %5:vrm8 = PseudoSF_VTMV_V_T %3:gpr, %1:gprnox0, 3, 1
+ %6:vrm8 = PseudoVADD_VV_M8 $noreg, %4:vrm8, killed %4:vrm8, %1:gprnox0, 3, 0
+ PseudoSF_VSTE16 %3:gpr, %2:gpr, %1:gprnox0, 4, 1
+ $v8m8 = COPY %6:vrm8
+ PseudoRET implicit $v8m8
+...
+---
+name: consecutive_xsfmm
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: vrm8 }
+ - { id: 1, class: gprnox0 }
+ - { id: 2, class: gprnox0 }
+ - { id: 3, class: gprnox0 }
+ - { id: 4, class: gprnox0 }
+liveins:
+ - { reg: '$v8m8', virtual-reg: '%0' }
+ - { reg: '$x10', virtual-reg: '%1' }
+ - { reg: '$x11', virtual-reg: '%2' }
+ - { reg: '$x12', virtual-reg: '%3' }
+ - { reg: '$x13', virtual-reg: '%4' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $v8m8, $x10, $x11, $x12, $x13
+ ; CHECK-LABEL: name: consecutive_xsfmm
+ ; CHECK: liveins: $v8m8, $x10, $x11, $x12, $x13
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vrm8 = COPY $v8m8
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gprnox0 = COPY $x12
+ ; CHECK-NEXT: dead [[COPY4:%[0-9]+]]:gprnox0 = COPY $x13
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY2]], 1032 /* e16, w2 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY1]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTK [[COPY3]], 4, 2, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_MM_F_F $t2, [[COPY]], [[COPY]], 7, $noreg, $noreg, $noreg, 4, 2, implicit $frm, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY3]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: PseudoSF_VSTE16 [[COPY1]], [[COPY2]], $noreg, 4, 1, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: PseudoRET
+ %0:vrm8 = COPY $v8m8
+ %1:gprnox0 = COPY $x10
+ %2:gprnox0 = COPY $x11
+ %3:gprnox0 = COPY $x12
+ %4:gprnox0 = COPY $x13
+ PseudoSF_MM_F_F $t2, %0:vrm8, %0:vrm8, 7, %1:gprnox0, %2:gprnox0, %3:gprnox0, 4, 2, implicit $frm
+ PseudoSF_VSTE16 %1:gprnox0, %2:gprnox0, %3:gprnox0, 4, 1
+ PseudoRET
+...
+---
+name: vsettnt_max
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprnox0 }
+liveins:
+ - { reg: '$x10', virtual-reg: '%0' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: vsettnt_max
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_:%[0-9]+]]:gpr = PseudoSF_VSETTNTX0 killed $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead [[PseudoSF_VSETTK:%[0-9]+]]:gprnox0 = PseudoSF_VSETTK [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype
+ ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_1:%[0-9]+]]:gprnox0 = PseudoSF_VSETTNTX0 $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: [[PseudoSF_VSETTM:%[0-9]+]]:gprnox0 = PseudoSF_VSETTM [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype
+ ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTM]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprnox0 = COPY $x10
+ %1:gprnox0 = PseudoSF_VSETTK %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype
+ %2:gprnox0 = PseudoSF_VSETTNTX0 $x0, 520, implicit-def $vl, implicit-def $vtype, implicit $vtype
+ %3:gprnox0 = PseudoSF_VSETTM %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype
+ $x10 = COPY %3:gprnox0
+ PseudoRET implicit $x10
+...
+---
+name: single_vsettm
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprnox0 }
+liveins:
+ - { reg: '$x10', virtual-reg: '%0' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: single_vsettm
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_:%[0-9]+]]:gpr = PseudoSF_VSETTNTX0 killed $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoSF_VSETTM:%[0-9]+]]:gprnox0 = PseudoSF_VSETTM [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype
+ ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTM]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprnox0 = COPY $x10
+ %1:gprnox0 = PseudoSF_VSETTM %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype
+ $x10 = COPY %1:gprnox0
+ PseudoRET implicit $x10
+...
+---
+name: single_vsettn
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprnox0 }
+liveins:
+ - { reg: '$x10', virtual-reg: '%0' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: single_vsettn
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[PseudoSF_VSETTNT:%[0-9]+]]:gprnox0 = PseudoSF_VSETTNT [[COPY]], 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTNT]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprnox0 = COPY $x10
+ %1:gprnox0 = PseudoSF_VSETTNT %0:gprnox0, 520, implicit-def $vl, implicit-def $vtype, implicit $vtype
+ $x10 = COPY %1:gprnox0
+ PseudoRET implicit $x10
+...
+---
+name: single_vsettk
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprnox0 }
+liveins:
+ - { reg: '$x10', virtual-reg: '%0' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $x10
+ ; CHECK-LABEL: name: single_vsettk
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: dead [[PseudoSF_VSETTNTX0_:%[0-9]+]]:gpr = PseudoSF_VSETTNTX0 killed $x0, 520 /* e16, w1 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoSF_VSETTK:%[0-9]+]]:gprnox0 = PseudoSF_VSETTK [[COPY]], 4, 1, implicit-def $vtype, implicit $vtype, implicit $vtype
+ ; CHECK-NEXT: $x10 = COPY [[PseudoSF_VSETTK]]
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %0:gprnox0 = COPY $x10
+ %1:gprnox0 = PseudoSF_VSETTK %0:gprnox0, 4, 1, implicit-def $vtype, implicit $vtype
+ $x10 = COPY %1:gprnox0
+ PseudoRET implicit $x10
+...
+---
+name: sf_vtzero
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gprnox0 }
+ - { id: 1, class: gprnox0 }
+liveins:
+ - { reg: '$x10', virtual-reg: '%0' }
+ - { reg: '$x11', virtual-reg: '%1' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ bb.0.entry:
+ liveins: $x10, $x11
+ ; CHECK-LABEL: name: sf_vtzero
+ ; CHECK: liveins: $x10, $x11
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTNT [[COPY1]], 1536 /* e8, w4 */, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoSF_VSETTM [[COPY]], 3, 3, implicit-def $vtype, implicit $vtype
+ ; CHECK-NEXT: PseudoSF_VTZERO_T $t1, $noreg, $noreg, 3, 4, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: PseudoRET
+ %0:gprnox0 = COPY $x10
+ %1:gprnox0 = COPY $x11
+ PseudoSF_VTZERO_T $t1, %0:gprnox0, %1:gprnox0, 3, 4
+ PseudoRET
+...
>From 6bcdbaa773bd3a73914457dc06382460d7926174 Mon Sep 17 00:00:00 2001
From: Brandon Wu <songwu0813 at gmail.com>
Date: Thu, 5 Jun 2025 02:12:05 -0700
Subject: [PATCH 2/2] [RISCV] Support XSfmm LLVM IR and CodeGen
Co-authored-by: Piyou Chen <piyou.chen at sifive.com>
---
llvm/include/llvm/IR/IntrinsicsRISCVXsf.td | 94 +++++++++
llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp | 6 +
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 183 ++++++++++++++++++
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h | 1 +
llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td | 51 +++++
.../CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll | 18 ++
.../RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll | 20 ++
.../RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll | 20 ++
.../RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll | 20 ++
.../RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll | 20 ++
.../CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll | 52 +++++
.../CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll | 20 ++
.../CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll | 20 ++
.../CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll | 20 ++
.../CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll | 20 ++
.../CodeGen/RISCV/rvv/sifive_sf_vlte16.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vlte32.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vlte64.ll | 23 +++
.../test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vsettk.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vsettm.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll | 72 +++++++
.../CodeGen/RISCV/rvv/sifive_sf_vste16.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vste32.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vste64.ll | 23 +++
.../test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll | 23 +++
.../CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll | 22 +++
.../CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll | 114 +++++++++++
.../CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll | 114 +++++++++++
.../CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll | 24 +++
30 files changed, 1141 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll
create mode 100644 llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
index bf20080229aa4..4a0272ca0a0a3 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td
@@ -180,4 +180,98 @@ let TargetPrefix = "riscv" in {
// XSfvfnrclipxfqf
defm int_riscv_sf_vfnrclip_x_f_qf : RISCVSFCustomVFNRCLIP;
defm int_riscv_sf_vfnrclip_xu_f_qf : RISCVSFCustomVFNRCLIP;
+
+ // XSfmm
+ // Output: (output_len)
+ // Input: (input_len, vsew, twiden)
+ class RISCVSFVSet
+ : DefaultAttrsIntrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>, IntrNoMem]>;
+
+ // Input: (tss, base, tn)
+ class RISCVSFTileLoad
+ : DefaultAttrsIntrinsic<[],
+ [llvm_anyint_ty, llvm_ptr_ty, LLVMMatchType<0>],
+ [NoCapture<ArgIndex<1>>, IntrHasSideEffects]>,
+ RISCVVIntrinsic;
+
+ // Input: (tss, base, tn)
+ class RISCVSFTileStore
+ : DefaultAttrsIntrinsic<[],
+ [llvm_anyint_ty, llvm_ptr_ty, LLVMMatchType<0>],
+ [NoCapture<ArgIndex<1>>, IntrWriteMem,
+ IntrHasSideEffects]>,
+ RISCVVIntrinsic;
+
+ // Output: ()
+ // Input: (mtd, mat1, mat2, tm, tn, tk, twiden)
+ class RISCVSFCustomMatMul<bit is_float = false>
+ : DefaultAttrsIntrinsic<[], [llvm_anyint_ty, llvm_anyvector_ty,
+ !if(is_float, LLVMMatchType<1>,
+ llvm_anyvector_ty),
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrHasSideEffects,
+ ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<6>>]>,
+ RISCVVIntrinsic;
+
+ def int_riscv_sf_vsettnt : RISCVSFVSet;
+ def int_riscv_sf_vsettm : RISCVSFVSet;
+ def int_riscv_sf_vsettk : RISCVSFVSet;
+
+ def int_riscv_sf_vlte8 : RISCVSFTileLoad;
+ def int_riscv_sf_vlte16 : RISCVSFTileLoad;
+ def int_riscv_sf_vlte32 : RISCVSFTileLoad;
+ def int_riscv_sf_vlte64 : RISCVSFTileLoad;
+ def int_riscv_sf_vste8 : RISCVSFTileStore;
+ def int_riscv_sf_vste16 : RISCVSFTileStore;
+ def int_riscv_sf_vste32 : RISCVSFTileStore;
+ def int_riscv_sf_vste64 : RISCVSFTileStore;
+
+ // Output: (vd)
+ // Input: (tss, tn)
+ def int_riscv_sf_vtmv_v_t
+ : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+ [llvm_anyint_ty, LLVMMatchType<1>],
+ [IntrNoMem, IntrHasSideEffects]>,
+ RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
+ // Output: ()
+ // Input: (tss, vs2, tn)
+ def int_riscv_sf_vtmv_t_v
+ : DefaultAttrsIntrinsic<[], [LLVMMatchType<1>, llvm_anyvector_ty,
+ llvm_anyint_ty], [IntrNoMem, IntrHasSideEffects]>,
+ RISCVVIntrinsic {
+ let VLOperand = 2;
+ }
+
+ foreach a = ["u", "s"] in {
+ foreach b = ["u", "s"] in {
+ def int_riscv_sf_mm_ # a # _ # b : RISCVSFCustomMatMul;
+ }
+ }
+
+ def int_riscv_sf_mm_f_f : RISCVSFCustomMatMul<true>;
+ foreach e1 = [5, 4] in
+ foreach e2 = [5, 4] in
+ def int_riscv_sf_mm_e # e1 # m # !sub(7, e1) # _e # e2 # m # !sub(7, e2)
+ : RISCVSFCustomMatMul<true>;
+
+ // Output: ()
+ // Input: (mtd)
+ def int_riscv_sf_vtzero_t
+ : DefaultAttrsIntrinsic<[],
+ [llvm_anyint_ty, LLVMMatchType<0>,LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<3>>,
+ ImmArg<ArgIndex<4>>, IntrNoMem, IntrHasSideEffects]>,
+ RISCVVIntrinsic;
+
+ // Output: ()
+ // Input: ()
+ def int_riscv_sf_vtdiscard
+ : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>,
+ RISCVVIntrinsic;
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
index 66ca43604670f..de433e407a48f 100644
--- a/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
+++ b/llvm/lib/Target/RISCV/RISCVAsmPrinter.cpp
@@ -1100,6 +1100,12 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
--NumOps;
if (RISCVII::hasRoundModeOp(TSFlags))
--NumOps;
+ if (RISCVII::hasTWidenOp(TSFlags))
+ --NumOps;
+ if (RISCVII::hasTMOp(TSFlags))
+ --NumOps;
+ if (RISCVII::hasTKOp(TSFlags))
+ --NumOps;
bool hasVLOutput = RISCVInstrInfo::isFaultOnlyFirstLoad(*MI);
for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index dda6023b37f7b..9f93f4ff1faaf 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -516,6 +516,44 @@ void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
CurDAG->getMachineNode(Opcode, DL, XLenVT, VLOperand, VTypeIOp));
}
+void RISCVDAGToDAGISel::selectXSfmmVSET(SDNode *Node) {
+ if (!Subtarget->hasVendorXSfmmbase())
+ return;
+
+ assert(Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN && "Unexpected opcode");
+
+ SDLoc DL(Node);
+ MVT XLenVT = Subtarget->getXLenVT();
+
+ unsigned IntNo = Node->getConstantOperandVal(0);
+
+ assert((IntNo == Intrinsic::riscv_sf_vsettnt ||
+ IntNo == Intrinsic::riscv_sf_vsettm ||
+ IntNo == Intrinsic::riscv_sf_vsettk) &&
+ "Unexpected XSfmm vset intrinsic");
+
+ unsigned SEW = RISCVVType::decodeVSEW(Node->getConstantOperandVal(2));
+ unsigned Widen = RISCVVType::decodeTWiden(Node->getConstantOperandVal(3));
+ unsigned PseudoOpCode =
+ IntNo == Intrinsic::riscv_sf_vsettnt ? RISCV::PseudoSF_VSETTNT
+ : IntNo == Intrinsic::riscv_sf_vsettm ? RISCV::PseudoSF_VSETTM
+ : RISCV::PseudoSF_VSETTK;
+
+ if (IntNo == Intrinsic::riscv_sf_vsettnt) {
+ unsigned VTypeI = RISCVVType::encodeXSfmmVType(SEW, Widen, 0);
+ SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT);
+
+ ReplaceNode(Node, CurDAG->getMachineNode(PseudoOpCode, DL, XLenVT,
+ Node->getOperand(1), VTypeIOp));
+ } else {
+ SDValue Log2SEW = CurDAG->getTargetConstant(Log2_32(SEW), DL, XLenVT);
+ SDValue TWiden = CurDAG->getTargetConstant(Widen, DL, XLenVT);
+ ReplaceNode(Node,
+ CurDAG->getMachineNode(PseudoOpCode, DL, XLenVT,
+ Node->getOperand(1), Log2SEW, TWiden));
+ }
+}
+
bool RISCVDAGToDAGISel::tryShrinkShlLogicImm(SDNode *Node) {
MVT VT = Node->getSimpleValueType(0);
unsigned Opcode = Node->getOpcode();
@@ -936,6 +974,11 @@ bool RISCVDAGToDAGISel::tryIndexedLoad(SDNode *Node) {
return true;
}
+static Register getTileReg(uint64_t TileNum) {
+ assert(TileNum <= 15 && "Invalid tile number");
+ return RISCV::T0 + TileNum;
+}
+
void RISCVDAGToDAGISel::selectSF_VC_X_SE(SDNode *Node) {
if (!Subtarget->hasVInstructions())
return;
@@ -2130,6 +2173,10 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_vsetvli:
case Intrinsic::riscv_vsetvlimax:
return selectVSETVLI(Node);
+ case Intrinsic::riscv_sf_vsettnt:
+ case Intrinsic::riscv_sf_vsettm:
+ case Intrinsic::riscv_sf_vsettk:
+ return selectXSfmmVSET(Node);
}
break;
}
@@ -2553,6 +2600,142 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
case Intrinsic::riscv_sf_vc_i_se:
selectSF_VC_X_SE(Node);
return;
+ case Intrinsic::riscv_sf_vlte8:
+ case Intrinsic::riscv_sf_vlte16:
+ case Intrinsic::riscv_sf_vlte32:
+ case Intrinsic::riscv_sf_vlte64: {
+ unsigned Log2SEW;
+ unsigned PseudoInst;
+ switch (IntNo) {
+ case Intrinsic::riscv_sf_vlte8:
+ PseudoInst = RISCV::PseudoSF_VLTE8;
+ Log2SEW = 3;
+ break;
+ case Intrinsic::riscv_sf_vlte16:
+ PseudoInst = RISCV::PseudoSF_VLTE16;
+ Log2SEW = 4;
+ break;
+ case Intrinsic::riscv_sf_vlte32:
+ PseudoInst = RISCV::PseudoSF_VLTE32;
+ Log2SEW = 5;
+ break;
+ case Intrinsic::riscv_sf_vlte64:
+ PseudoInst = RISCV::PseudoSF_VLTE64;
+ Log2SEW = 6;
+ break;
+ }
+
+ SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT);
+ SDValue TWidenOp = CurDAG->getTargetConstant(1, DL, XLenVT);
+ SDValue Operands[] = {Node->getOperand(2),
+ Node->getOperand(3),
+ Node->getOperand(4),
+ SEWOp,
+ TWidenOp,
+ Node->getOperand(0)};
+
+ MachineSDNode *TileLoad =
+ CurDAG->getMachineNode(PseudoInst, DL, Node->getVTList(), Operands);
+ if (auto *MemOp = dyn_cast<MemSDNode>(Node))
+ CurDAG->setNodeMemRefs(TileLoad, {MemOp->getMemOperand()});
+
+ ReplaceNode(Node, TileLoad);
+ return;
+ }
+ case Intrinsic::riscv_sf_mm_s_s:
+ case Intrinsic::riscv_sf_mm_s_u:
+ case Intrinsic::riscv_sf_mm_u_s:
+ case Intrinsic::riscv_sf_mm_u_u:
+ case Intrinsic::riscv_sf_mm_e5m2_e5m2:
+ case Intrinsic::riscv_sf_mm_e5m2_e4m3:
+ case Intrinsic::riscv_sf_mm_e4m3_e5m2:
+ case Intrinsic::riscv_sf_mm_e4m3_e4m3:
+ case Intrinsic::riscv_sf_mm_f_f: {
+ bool HasFRM = false;
+ unsigned PseudoInst;
+ switch (IntNo) {
+ case Intrinsic::riscv_sf_mm_s_s:
+ PseudoInst = RISCV::PseudoSF_MM_S_S;
+ break;
+ case Intrinsic::riscv_sf_mm_s_u:
+ PseudoInst = RISCV::PseudoSF_MM_S_U;
+ break;
+ case Intrinsic::riscv_sf_mm_u_s:
+ PseudoInst = RISCV::PseudoSF_MM_U_S;
+ break;
+ case Intrinsic::riscv_sf_mm_u_u:
+ PseudoInst = RISCV::PseudoSF_MM_U_U;
+ break;
+ case Intrinsic::riscv_sf_mm_e5m2_e5m2:
+ PseudoInst = RISCV::PseudoSF_MM_E5M2_E5M2;
+ HasFRM = true;
+ break;
+ case Intrinsic::riscv_sf_mm_e5m2_e4m3:
+ PseudoInst = RISCV::PseudoSF_MM_E5M2_E4M3;
+ HasFRM = true;
+ break;
+ case Intrinsic::riscv_sf_mm_e4m3_e5m2:
+ PseudoInst = RISCV::PseudoSF_MM_E4M3_E5M2;
+ HasFRM = true;
+ break;
+ case Intrinsic::riscv_sf_mm_e4m3_e4m3:
+ PseudoInst = RISCV::PseudoSF_MM_E4M3_E4M3;
+ HasFRM = true;
+ break;
+ case Intrinsic::riscv_sf_mm_f_f:
+ if (Node->getOperand(3).getValueType().getScalarType() == MVT::bf16)
+ PseudoInst = RISCV::PseudoSF_MM_F_F_ALT;
+ else
+ PseudoInst = RISCV::PseudoSF_MM_F_F;
+ HasFRM = true;
+ break;
+ }
+ uint64_t TileNum = Node->getConstantOperandVal(2);
+ SDValue Op1 = Node->getOperand(3);
+ SDValue Op2 = Node->getOperand(4);
+ MVT VT = Op1->getSimpleValueType(0);
+ unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+ SDValue TmOp = Node->getOperand(5);
+ SDValue TnOp = Node->getOperand(6);
+ SDValue TkOp = Node->getOperand(7);
+ SDValue TWidenOp = Node->getOperand(8);
+ SDValue Chain = Node->getOperand(0);
+
+ // sf.mm.f.f with sew=32, twiden=2 is invalid
+ if (IntNo == Intrinsic::riscv_sf_mm_f_f && Log2SEW == 5 &&
+ TWidenOp->getAsZExtVal() == 2)
+ reportFatalUsageError("sf.mm.f.f doesn't support (sew=32, twiden=2)");
+
+ SmallVector<SDValue, 10> Operands(
+ {CurDAG->getRegister(getTileReg(TileNum), XLenVT), Op1, Op2});
+ if (HasFRM)
+ Operands.push_back(
+ CurDAG->getTargetConstant(RISCVFPRndMode::DYN, DL, XLenVT));
+ Operands.append({TmOp, TnOp, TkOp,
+ CurDAG->getTargetConstant(Log2SEW, DL, XLenVT), TWidenOp,
+ Chain});
+
+ auto *NewNode =
+ CurDAG->getMachineNode(PseudoInst, DL, Node->getVTList(), Operands);
+
+ ReplaceNode(Node, NewNode);
+ return;
+ }
+ case Intrinsic::riscv_sf_vtzero_t: {
+ uint64_t TileNum = Node->getConstantOperandVal(2);
+ SDValue Tm = Node->getOperand(3);
+ SDValue Tn = Node->getOperand(4);
+ SDValue Log2SEW = Node->getOperand(5);
+ SDValue TWiden = Node->getOperand(6);
+ SDValue Chain = Node->getOperand(0);
+ auto *NewNode = CurDAG->getMachineNode(
+ RISCV::PseudoSF_VTZERO_T, DL, Node->getVTList(),
+ {CurDAG->getRegister(getTileReg(TileNum), XLenVT), Tm, Tn, Log2SEW,
+ TWiden, Chain});
+
+ ReplaceNode(Node, NewNode);
+ return;
+ }
}
break;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index cf2f763abc063..fb5678f6e8272 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -167,6 +167,7 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
void selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, bool IsOrdered);
void selectVSETVLI(SDNode *Node);
+ void selectXSfmmVSET(SDNode *Node);
void selectSF_VC_X_SE(SDNode *Node);
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td
index c405feeb7d6f5..b170803a5909d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSfmm.td
@@ -415,3 +415,54 @@ let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
(ins TR:$rd, AVL:$atm, AVL:$atn, ixlenimm:$sew, ixlenimm:$twiden)>;
def PseudoSF_VTDISCARD : RISCVVPseudo<(outs), (ins), []>;
}
+
+class VPatXSfmmTileStore<string intrinsic_name,
+ string inst_name,
+ int log2sew> :
+ Pat<(!cast<Intrinsic>(intrinsic_name)
+ (XLenVT GPR:$rs2),
+ (XLenVT GPR:$rs1),
+ (XLenVT AVL:$tn)),
+ (!cast<Instruction>(inst_name)
+ (XLenVT GPR:$rs2),
+ (XLenVT GPR:$rs1),
+ GPR:$tn, log2sew, 1)>;
+
+class VPatXSfmmTileMove_T_V<string intrinsic_name,
+ string inst_name,
+ ValueType reg_type,
+ int log2sew> :
+ Pat<(!cast<Intrinsic>(intrinsic_name)
+ (XLenVT GPR:$rs1),
+ (reg_type VRM8:$vs2),
+ (XLenVT AVL:$atn)),
+ (!cast<Instruction>(inst_name)
+ (XLenVT GPR:$rs1),
+ (reg_type VRM8:$vs2),
+ GPR:$atn, log2sew, 1)>;
+
+class VPatXSfmmTileMove_V_T<string intrinsic_name,
+ string inst_name,
+ ValueType result_type,
+ int log2sew> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (XLenVT GPR:$rs1),
+ (XLenVT AVL:$atn))),
+ (!cast<Instruction>(inst_name)
+ (XLenVT GPR:$rs1),
+ GPR:$atn, log2sew, 1)>;
+
+class VPatXSfmmVTDiscard<string intrinsic_name,
+ string inst_name> :
+ Pat<(!cast<Intrinsic>(intrinsic_name)),
+ (!cast<Instruction>(inst_name))>;
+
+foreach eew = [8, 16, 32, 64] in
+ def : VPatXSfmmTileStore<"int_riscv_sf_vste" # eew, "PseudoSF_VSTE" # eew, !logtwo(eew)>;
+
+foreach vti = [VI8M8, VI16M8, VI32M8, VI64M8, VF16M8, VF32M8, VF64M8, VBF16M8] in {
+ def : VPatXSfmmTileMove_T_V<"int_riscv_sf_vtmv_t_v", "PseudoSF_VTMV_T_V", vti.Vector, vti.Log2SEW>;
+ def : VPatXSfmmTileMove_V_T<"int_riscv_sf_vtmv_v_t", "PseudoSF_VTMV_V_T", vti.Vector, vti.Log2SEW>;
+}
+
+def : VPatXSfmmVTDiscard<"int_riscv_sf_vtdiscard", "PseudoSF_VTDISCARD">;
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll
new file mode 100644
index 0000000000000..d9a49a1b6b6ea
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive-O0-ATM-ATK.ll
@@ -0,0 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v -O0 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64
+
+define void @matmul() {
+; CHECK-RV64-LABEL: matmul:
+; CHECK-RV64: # %bb.0: # %entry
+; CHECK-RV64-NEXT: li a0, 0
+; CHECK-RV64-NEXT: vsetvli zero, a0, 512
+; CHECK-RV64-NEXT: sf.vsettm zero, a0
+; CHECK-RV64-NEXT: sf.vtzero.t mt0
+; CHECK-RV64-NEXT: ret
+entry:
+ call void @llvm.riscv.sf.vtzero.t.i64(i64 0, i64 0, i64 0, i64 3, i64 1)
+ ret void
+}
+
+; Function Attrs: nocallback nofree nosync nounwind willreturn
+declare void @llvm.riscv.sf.vtzero.t.i64(i64 immarg, i64, i64, i64 immarg, i64 immarg) #0
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll
new file mode 100644
index 0000000000000..9b9a849cd7262
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e4m3.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.e4m3.e4m3.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_e4m3_e4m3_w4_u8m8_u8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.e4m3.e4m3 mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.e4m3.e4m3.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll
new file mode 100644
index 0000000000000..b63974f04a66e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e4m3_e5m2.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.e4m3.e5m2.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.e4m3.e5m2 mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.e4m3.e5m2.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll
new file mode 100644
index 0000000000000..62d629b1b1f1d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e4m3.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.e5m2.e4m3.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_e5m2_e5m2_w4_u8m8_u8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.e5m2.e4m3 mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.e5m2.e4m3.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll
new file mode 100644
index 0000000000000..7a90c97bcf0be
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_e5m2_e5m2.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.e5m2.e5m2.iXLen.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_e4m3_e5m2_w4_u8m8_u8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.e5m2.e5m2 mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.e5m2.e5m2.iXLen.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll
new file mode 100644
index 0000000000000..29451c60b9248
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_f_f.ll
@@ -0,0 +1,52 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+xsfmm32a32f -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+xsfmm32a32f -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv32f16(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_f_f_w2_f16m8(iXLen %mtd, <vscale x 32 x half> %v1, <vscale x 32 x half> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_f_f_w2_f16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e16, w2
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.f.f mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.f.f.iXLen.nxv32f16(iXLen 0, <vscale x 32 x half> %v1, <vscale x 32 x half> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 2)
+ ret void
+}
+
+declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv16f32(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_f_f_w1_f32m8(iXLen %mtd, <vscale x 16 x float> %v1, <vscale x 16 x float> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_f_f_w1_f32m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e32, w1
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.f.f mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.f.f.iXLen.nxv16f32(iXLen 0, <vscale x 16 x float> %v1, <vscale x 16 x float> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 1)
+ ret void
+}
+
+declare void @llvm.riscv.sf.mm.f.f.iXLen.nxv8f64(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_f_f_w1_f64m8(iXLen %mtd, <vscale x 8 x double> %v1, <vscale x 8 x double> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_f_f_w1_f64m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e64, w1
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.f.f mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.f.f.iXLen.nxv8f64(iXLen 0, <vscale x 8 x double> %v1, <vscale x 8 x double> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 1)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll
new file mode 100644
index 0000000000000..6a4b29ff0e786
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_s.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.s.s.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_s_s_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_s_s_w4_i8m8_i8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.s.s mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.s.s.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll
new file mode 100644
index 0000000000000..79239b01cd1d4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_s_u.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.s.u.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_s_u_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_s_u_w4_i8m8_i8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.s.u mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.s.u.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll
new file mode 100644
index 0000000000000..b0d039bb194a4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_s.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.u.s.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_u_s_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_u_s_w4_i8m8_i8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.u.s mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.u.s.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll
new file mode 100644
index 0000000000000..913c277655e43
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_mm_u_u.ll
@@ -0,0 +1,20 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfmm32a8i \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.mm.u.u.iXLen.nxv64i8.nxv64i8(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen, iXLen, iXLen)
+
+define void @test_sf_mm_u_u_w4_i8m8_i8m8(iXLen %mtd, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk) {
+; CHECK-LABEL: test_sf_mm_u_u_w4_i8m8_i8m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a1
+; CHECK-NEXT: sf.vsettk zero, a3
+; CHECK-NEXT: sf.mm.u.u mt0, v8, v16
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.mm.u.u.iXLen.nxv64i8.nxv64i8(iXLen 0, <vscale x 64 x i8> %v1, <vscale x 64 x i8> %v2, iXLen %tm, iXLen %tn, iXLen %tk, iXLen 4)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll
new file mode 100644
index 0000000000000..8048dec110a5f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte16.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vlte16.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vlte16(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vlte16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e16, w1
+; CHECK-NEXT: sf.vlte16 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vlte16.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll
new file mode 100644
index 0000000000000..a526dc8471b1a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte32.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vlte32.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vlte32(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vlte32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e32, w1
+; CHECK-NEXT: sf.vlte32 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vlte32.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll
new file mode 100644
index 0000000000000..ed0c48ac467e6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte64.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vlte64.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vlte64(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vlte64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e64, w1
+; CHECK-NEXT: sf.vlte64 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vlte64.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll
new file mode 100644
index 0000000000000..67b3ed2ec55ab
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vlte8.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vlte8.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vlte8(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vlte8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w1
+; CHECK-NEXT: sf.vlte8 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vlte8.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll
new file mode 100644
index 0000000000000..4da37fad1b536
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettk.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare iXLen @llvm.riscv.sf.vsettk.iXLen(iXLen, iXLen, iXLen)
+
+define iXLen @test_sf_vsettk(iXLen %tk) {
+; CHECK-LABEL: test_sf_vsettk:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a1, zero, e16, w2
+; CHECK-NEXT: sf.vsettk a0, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettk.iXLen(iXLen %tk, iXLen 1, iXLen 2)
+ ret iXLen %0
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll
new file mode 100644
index 0000000000000..143c26cc8cff1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettm.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare iXLen @llvm.riscv.sf.vsettm.iXLen(iXLen, iXLen, iXLen)
+
+define iXLen @test_sf_vsettm(iXLen %tm) {
+; CHECK-LABEL: test_sf_vsettm:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a1, zero, e8, w4
+; CHECK-NEXT: sf.vsettm a0, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettm.iXLen(iXLen %tm, iXLen 0, iXLen 3)
+ ret iXLen %0
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll
new file mode 100644
index 0000000000000..48fa1bc8f6cbe
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vsettnt.ll
@@ -0,0 +1,72 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen, iXLen, iXLen)
+
+define iXLen @test_sf_vsettnt_e8w1(iXLen %tn) {
+; CHECK-LABEL: test_sf_vsettnt_e8w1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a0, a0, e8, w1
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 0, iXLen 1)
+ ret iXLen %0
+}
+
+define iXLen @test_sf_vsettnt_e8w2(iXLen %tn) {
+; CHECK-LABEL: test_sf_vsettnt_e8w2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a0, a0, e8, w2
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 0, iXLen 2)
+ ret iXLen %0
+}
+
+define iXLen @test_sf_vsettnt_e8w4(iXLen %tn) {
+; CHECK-LABEL: test_sf_vsettnt_e8w4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a0, a0, e8, w4
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 0, iXLen 3)
+ ret iXLen %0
+}
+
+define iXLen @test_sf_vsettnt_e16w1(iXLen %tn) {
+; CHECK-LABEL: test_sf_vsettnt_e16w1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a0, a0, e16, w1
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 1, iXLen 1)
+ ret iXLen %0
+}
+
+define iXLen @test_sf_vsettnt_e16w2(iXLen %tn) {
+; CHECK-LABEL: test_sf_vsettnt_e16w2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a0, a0, e16, w2
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 1, iXLen 2)
+ ret iXLen %0
+}
+
+define iXLen @test_sf_vsettnt_e16w4(iXLen %tn) {
+; CHECK-LABEL: test_sf_vsettnt_e16w4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt a0, a0, e16, w4
+; CHECK-NEXT: ret
+ entry:
+ %0 = call iXLen @llvm.riscv.sf.vsettnt.iXLen(iXLen %tn, iXLen 1, iXLen 3)
+ ret iXLen %0
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll
new file mode 100644
index 0000000000000..7a76151e01cc5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste16.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vste16.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vste16(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vste16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e16, w1
+; CHECK-NEXT: sf.vste16 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vste16.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll
new file mode 100644
index 0000000000000..8ff6e6af3b02d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste32.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vste32.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vste32(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vste32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e32, w1
+; CHECK-NEXT: sf.vste32 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vste32.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll
new file mode 100644
index 0000000000000..53990e4dd2483
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste64.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vste64.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vste64(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vste64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e64, w1
+; CHECK-NEXT: sf.vste64 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vste64.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll
new file mode 100644
index 0000000000000..09b72594ac7c6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vste8.ll
@@ -0,0 +1,23 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vste8.iXLen(iXLen, ptr, iXLen)
+
+define dso_local void @test_sf_vste8(iXLen %tss, ptr %base, iXLen %vl) {
+; CHECK-LABEL: test_sf_vste8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a2, e8, w1
+; CHECK-NEXT: sf.vste8 a0, (a1)
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vste8.iXLen(iXLen %tss, ptr %base, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll
new file mode 100644
index 0000000000000..394eb60f73743
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtdiscard.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vtdiscard()
+
+define dso_local void @test_sf_vtdiscard() {
+; CHECK-LABEL: test_sf_vtdiscard:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vtdiscard
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtdiscard()
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll
new file mode 100644
index 0000000000000..66c9d26c209f0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_t_v.ll
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv32bf16.iXLen(iXLen, <vscale x 32 x bfloat>, iXLen)
+
+define void @test_sf_vtmv_t_v_bf16m8(iXLen %tss, <vscale x 32 x bfloat> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_bf16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv32bf16.iXLen(iXLen %tss, <vscale x 32 x bfloat> %src, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, iXLen)
+
+define void @test_sf_vtmv_t_v_f16(iXLen %tss, <vscale x 32 x half> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv32f16.iXLen(iXLen %tss, <vscale x 32 x half> %src, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, iXLen)
+
+define void @test_sf_vtmv_t_v_f32(iXLen %tss, <vscale x 16 x float> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv16f32.iXLen(iXLen %tss, <vscale x 16 x float> %src, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, iXLen)
+
+define void @test_sf_vtmv_t_v_f64(iXLen %tss, <vscale x 8 x double> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv8f64.iXLen(iXLen %tss, <vscale x 8 x double> %src, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, iXLen)
+
+define void @test_sf_vtmv_t_v_i8(iXLen %tss, <vscale x 64 x i8> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e8, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv64i8.iXLen(iXLen %tss, <vscale x 64 x i8> %src, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, iXLen)
+
+define void @test_sf_vtmv_t_v_i16(iXLen %tss, <vscale x 32 x i16> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv32i16.iXLen(iXLen %tss, <vscale x 32 x i16> %src, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, iXLen)
+
+define void @test_sf_vtmv_t_v_i32(iXLen %tss, <vscale x 16 x i32> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv16i32.iXLen(iXLen %tss, <vscale x 16 x i32> %src, iXLen %vl)
+ ret void
+}
+
+declare void @llvm.riscv.sf.vtmv.t.v.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, iXLen)
+
+define void @test_sf_vtmv_t_v_i64(iXLen %tss, <vscale x 8 x i64> %src, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_t_v_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1
+; CHECK-NEXT: sf.vtmv.t.v a0, v8
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtmv.t.v.nxv8i64.iXLen(iXLen %tss, <vscale x 8 x i64> %src, iXLen %vl)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll
new file mode 100644
index 0000000000000..0dcc2ab5b9a0d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtmv_v_t.ll
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare <vscale x 32 x bfloat> @llvm.riscv.sf.vtmv.v.t.nxv32bf16.iXLen(iXLen, iXLen)
+
+define <vscale x 32 x bfloat> @test_sf_vtmv_v_t_bf16m8(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_bf16m8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 32 x bfloat> @llvm.riscv.sf.vtmv.v.t.nxv32bf16.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 32 x bfloat> %0
+}
+
+declare <vscale x 32 x half> @llvm.riscv.sf.vtmv.v.t.nxv32f16.iXLen(iXLen, iXLen)
+
+define <vscale x 32 x half> @test_sf_vtmv_v_t_f16(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 32 x half> @llvm.riscv.sf.vtmv.v.t.nxv32f16.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 32 x half> %0
+}
+
+declare <vscale x 16 x float> @llvm.riscv.sf.vtmv.v.t.nxv16f32.iXLen(iXLen, iXLen)
+
+define <vscale x 16 x float> @test_sf_vtmv_v_t_f32(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 16 x float> @llvm.riscv.sf.vtmv.v.t.nxv16f32.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 16 x float> %0
+}
+
+declare <vscale x 8 x double> @llvm.riscv.sf.vtmv.v.t.nxv8f64.iXLen(iXLen, iXLen)
+
+define <vscale x 8 x double> @test_sf_vtmv_v_t_f64(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 8 x double> @llvm.riscv.sf.vtmv.v.t.nxv8f64.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 8 x double> %0
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.iXLen(iXLen, iXLen)
+
+define <vscale x 64 x i8> @test_sf_vtmv_v_t_i8(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e8, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 64 x i8> @llvm.riscv.sf.vtmv.v.t.nxv64i8.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 64 x i8> %0
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.sf.vtmv.v.t.nxv32i16.iXLen(iXLen, iXLen)
+
+define <vscale x 32 x i16> @test_sf_vtmv_v_t_i16(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e16, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 32 x i16> @llvm.riscv.sf.vtmv.v.t.nxv32i16.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 32 x i16> %0
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.sf.vtmv.v.t.nxv16i32.iXLen(iXLen, iXLen)
+
+define <vscale x 16 x i32> @test_sf_vtmv_v_t_i32(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e32, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 16 x i32> @llvm.riscv.sf.vtmv.v.t.nxv16i32.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 16 x i32> %0
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.sf.vtmv.v.t.nxv8i64.iXLen(iXLen, iXLen)
+
+define <vscale x 8 x i64> @test_sf_vtmv_v_t_i64(iXLen %tss, iXLen %vl) {
+; CHECK-LABEL: test_sf_vtmv_v_t_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e64, w1
+; CHECK-NEXT: sf.vtmv.v.t v8, a0
+; CHECK-NEXT: ret
+ entry:
+ %0 = call <vscale x 8 x i64> @llvm.riscv.sf.vtmv.v.t.nxv8i64.iXLen(iXLen %tss, iXLen %vl)
+ ret <vscale x 8 x i64> %0
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll
new file mode 100644
index 0000000000000..bbccb026f161b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/sifive_sf_vtzero_t.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -mattr=+zvfh -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+experimental-zvfbfmin -mattr=+xsfmmbase \
+; RUN: -mattr=+xsfmm32a -mattr=+xsfmm32a8f -mattr=+xsfmm32a4i -mattr=+xsfmm64a64f \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+
+declare void @llvm.riscv.sf.vtzero.t.iXLen(iXLen, iXLen, iXLen, iXLen, iXLen)
+define void @test_sf_vtzero_t(iXLen %tm, iXLen %tn) {
+; CHECK-LABEL: test_sf_vtzero_t:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: sf.vsettnt zero, a1, e8, w4
+; CHECK-NEXT: sf.vsettm zero, a0
+; CHECK-NEXT: sf.vtzero.t mt0
+; CHECK-NEXT: ret
+ entry:
+ call void @llvm.riscv.sf.vtzero.t.iXLen(iXLen 0, iXLen %tm, iXLen %tn, iXLen 3, iXLen 4)
+ ret void
+}
+
More information about the llvm-commits
mailing list