[llvm] [RISCV] Rename -enable-p-ext-codegen -riscv-enable-p-ext-simd-codegen. (PR #172790)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 17 22:22:48 PST 2025
https://github.com/topperc updated https://github.com/llvm/llvm-project/pull/172790
>From f93a945a05180c49e949541e7834a76b616fff16 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 17 Dec 2025 20:22:49 -0800
Subject: [PATCH] [RISCV] Rename -enable-p-ext-codegen
-riscv-enable-p-ext-simd-codegen.
Make it clear this only applies to SIMD code and that is belongs to
RISC-V.
---
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 10 +++++-----
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 18 +++++++++---------
llvm/lib/Target/RISCV/RISCVSubtarget.cpp | 12 ++++++------
llvm/lib/Target/RISCV/RISCVSubtarget.h | 2 +-
.../Target/RISCV/RISCVTargetTransformInfo.cpp | 6 +++---
llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll | 4 ++--
llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll | 2 +-
7 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 4e390a14dd8d9..68b8d68341d3c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -51,7 +51,7 @@ void RISCVDAGToDAGISel::PreprocessISelDAG() {
SDValue Result;
switch (N->getOpcode()) {
case ISD::SPLAT_VECTOR: {
- if (Subtarget->enablePExtCodeGen())
+ if (Subtarget->enablePExtSIMDCodeGen())
break;
// Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point
// SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden.
@@ -1049,7 +1049,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
else if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node))
Imm = SignExtend64<32>(Imm);
- if (Subtarget->enablePExtCodeGen() && isApplicableToPLI(Imm) &&
+ if (Subtarget->enablePExtSIMDCodeGen() && isApplicableToPLI(Imm) &&
hasAllWUsers(Node)) {
// If it's 4 packed 8-bit integers or 2 packed signed 16-bit integers, we
// can simply copy lower 32 bits to higher 32 bits to make it able to
@@ -1877,7 +1877,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
return;
}
case RISCVISD::PPACK_DH: {
- assert(Subtarget->enablePExtCodeGen() && Subtarget->isRV32());
+ assert(Subtarget->enablePExtSIMDCodeGen() && Subtarget->isRV32());
SDValue Val0 = Node->getOperand(0);
SDValue Val1 = Node->getOperand(1);
@@ -2722,7 +2722,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
CurDAG->RemoveDeadNode(Node);
return;
}
- if (Subtarget->enablePExtCodeGen()) {
+ if (Subtarget->enablePExtSIMDCodeGen()) {
bool Is32BitCast =
(VT == MVT::i32 && (SrcVT == MVT::v4i8 || SrcVT == MVT::v2i16)) ||
(SrcVT == MVT::i32 && (VT == MVT::v4i8 || VT == MVT::v2i16));
@@ -2740,7 +2740,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
break;
}
case ISD::SCALAR_TO_VECTOR:
- if (Subtarget->enablePExtCodeGen()) {
+ if (Subtarget->enablePExtSIMDCodeGen()) {
MVT SrcVT = Node->getOperand(0).getSimpleValueType();
if ((VT == MVT::v2i32 && SrcVT == MVT::i64) ||
(VT == MVT::v4i8 && SrcVT == MVT::i32)) {
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d6f8afafc4357..5a3f59c4b1baa 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -287,7 +287,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
}
// fixed vector is stored in GPRs for P extension packed operations
- if (Subtarget.enablePExtCodeGen()) {
+ if (Subtarget.enablePExtSIMDCodeGen()) {
if (Subtarget.is64Bit()) {
addRegisterClass(MVT::v2i32, &RISCV::GPRRegClass);
addRegisterClass(MVT::v4i16, &RISCV::GPRRegClass);
@@ -502,7 +502,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::FTRUNC, ISD::FRINT, ISD::FROUND,
ISD::FROUNDEVEN, ISD::FCANONICALIZE};
- if (Subtarget.enablePExtCodeGen()) {
+ if (Subtarget.enablePExtSIMDCodeGen()) {
setTargetDAGCombine(ISD::TRUNCATE);
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
setTruncStoreAction(MVT::v4i16, MVT::v4i8, Expand);
@@ -1851,7 +1851,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
TargetLoweringBase::LegalizeTypeAction
RISCVTargetLowering::getPreferredVectorAction(MVT VT) const {
- if (Subtarget.is64Bit() && Subtarget.enablePExtCodeGen())
+ if (Subtarget.is64Bit() && Subtarget.enablePExtSIMDCodeGen())
if (VT == MVT::v2i16 || VT == MVT::v4i8)
return TypeWidenVector;
@@ -4517,7 +4517,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
SDLoc DL(Op);
- if (Subtarget.isRV32() && Subtarget.enablePExtCodeGen()) {
+ if (Subtarget.isRV32() && Subtarget.enablePExtSIMDCodeGen()) {
if (VT != MVT::v4i8)
return SDValue();
@@ -7791,7 +7791,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
}
- if (Subtarget.enablePExtCodeGen()) {
+ if (Subtarget.enablePExtSIMDCodeGen()) {
bool Is32BitCast =
(VT == MVT::i32 && (Op0VT == MVT::v4i8 || Op0VT == MVT::v2i16)) ||
(Op0VT == MVT::i32 && (VT == MVT::v4i8 || VT == MVT::v2i16));
@@ -8476,7 +8476,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
auto *Store = cast<StoreSDNode>(Op);
SDValue StoredVal = Store->getValue();
EVT VT = StoredVal.getValueType();
- if (Subtarget.enablePExtCodeGen()) {
+ if (Subtarget.enablePExtSIMDCodeGen()) {
if (VT == MVT::v2i16 || VT == MVT::v4i8) {
SDValue DL(Op);
SDValue Cast = DAG.getBitcast(MVT::i32, StoredVal);
@@ -8676,7 +8676,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::SRL:
case ISD::SRA:
if (Op.getSimpleValueType().isFixedLengthVector()) {
- if (Subtarget.enablePExtCodeGen()) {
+ if (Subtarget.enablePExtSIMDCodeGen()) {
// We have patterns for scalar/immediate shift amount, so no lowering
// needed.
if (Op.getOperand(1)->getOpcode() == ISD::SPLAT_VECTOR)
@@ -10823,7 +10823,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
return DAG.getNode(RISCVISD::FMV_H_X, DL, EltVT, IntExtract);
}
- if (Subtarget.enablePExtCodeGen() && VecVT.isFixedLengthVector()) {
+ if (Subtarget.enablePExtSIMDCodeGen() && VecVT.isFixedLengthVector()) {
if (VecVT != MVT::v4i16 && VecVT != MVT::v2i16 && VecVT != MVT::v8i8 &&
VecVT != MVT::v4i8 && VecVT != MVT::v2i32)
return SDValue();
@@ -16489,7 +16489,7 @@ static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- if (VT.isFixedLengthVector() && Subtarget.enablePExtCodeGen())
+ if (VT.isFixedLengthVector() && Subtarget.enablePExtSIMDCodeGen())
return combinePExtTruncate(N, DAG, Subtarget);
// Pre-promote (i1 (truncate (srl X, Y))) on RV64 with Zbs without zero
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index f86265a21d17e..b2cbd334b218e 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -69,10 +69,10 @@ static cl::opt<bool> UseMIPSCCMovInsn("use-riscv-mips-ccmov",
cl::desc("Use 'mips.ccmov' instruction"),
cl::init(true), cl::Hidden);
-static cl::opt<bool> EnablePExtCodeGen(
- "enable-p-ext-codegen",
- cl::desc("Turn on P Extension codegen(This is a temporary switch where "
- "only partial codegen is currently supported)"),
+static cl::opt<bool> EnablePExtSIMDCodeGen(
+ "riscv-enable-p-ext-simd-codegen",
+ cl::desc("Turn on P Extension SIMD codegen(This is a temporary switch "
+ "where only partial codegen is currently supported)"),
cl::init(false), cl::Hidden);
void RISCVSubtarget::anchor() {}
@@ -153,8 +153,8 @@ bool RISCVSubtarget::useConstantPoolForLargeInts() const {
return !RISCVDisableUsingConstantPoolForLargeInts;
}
-bool RISCVSubtarget::enablePExtCodeGen() const {
- return HasStdExtP && EnablePExtCodeGen;
+bool RISCVSubtarget::enablePExtSIMDCodeGen() const {
+ return HasStdExtP && EnablePExtSIMDCodeGen;
}
unsigned RISCVSubtarget::getMaxBuildIntsCost() const {
diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h
index ae6ca970c0c49..ef95ed6338ab9 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.h
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h
@@ -328,7 +328,7 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo {
}
}
- bool enablePExtCodeGen() const;
+ bool enablePExtSIMDCodeGen() const;
// Returns VLEN divided by DLEN. Where DLEN is the datapath width of the
// vector hardware implementation which may be less than VLEN.
diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index bb469e98e794e..fa4043ab3d888 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -972,7 +972,7 @@ InstructionCost RISCVTTIImpl::getScalarizationOverhead(
// TODO: Add proper cost model for P extension fixed vectors (e.g., v4i16)
// For now, skip all fixed vector cost analysis when P extension is available
// to avoid crashes in getMinRVVVectorSizeInBits()
- if (ST->enablePExtCodeGen() && isa<FixedVectorType>(Ty)) {
+ if (ST->enablePExtSIMDCodeGen() && isa<FixedVectorType>(Ty)) {
return 1; // Treat as single instruction cost for now
}
@@ -1697,7 +1697,7 @@ InstructionCost RISCVTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
// TODO: Add proper cost model for P extension fixed vectors (e.g., v4i16)
// For now, skip all fixed vector cost analysis when P extension is available
// to avoid crashes in getMinRVVVectorSizeInBits()
- if (ST->enablePExtCodeGen() &&
+ if (ST->enablePExtSIMDCodeGen() &&
(isa<FixedVectorType>(Dst) || isa<FixedVectorType>(Src))) {
return 1; // Treat as single instruction cost for now
}
@@ -2403,7 +2403,7 @@ InstructionCost RISCVTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
// TODO: Add proper cost model for P extension fixed vectors (e.g., v4i16)
// For now, skip all fixed vector cost analysis when P extension is available
// to avoid crashes in getMinRVVVectorSizeInBits()
- if (ST->enablePExtCodeGen() && isa<FixedVectorType>(Val)) {
+ if (ST->enablePExtSIMDCodeGen() && isa<FixedVectorType>(Val)) {
return 1; // Treat as single instruction cost for now
}
diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll
index adc8de106fc00..2836cda16b6d9 100644
--- a/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv32.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-p -enable-p-ext-codegen -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK-RV32 %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -enable-p-ext-codegen -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK-RV64 %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-p -riscv-enable-p-ext-simd-codegen -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK-RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -riscv-enable-p-ext-simd-codegen -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,CHECK-RV64 %s
; Test basic add/sub operations for v2i16
define void @test_padd_h(ptr %ret_ptr, ptr %a_ptr, ptr %b_ptr) {
diff --git a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll
index 4adbded1eb82b..dfa1b242e656f 100644
--- a/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvp-ext-rv64.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -enable-p-ext-codegen -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -riscv-enable-p-ext-simd-codegen -verify-machineinstrs < %s | FileCheck %s
; Test basic add/sub operations for v4i16
define void @test_padd_h(ptr %ret_ptr, ptr %a_ptr, ptr %b_ptr) {
More information about the llvm-commits
mailing list