[llvm] [RISCV] Remove riscv-experimental-rv64-legal-i32. (PR #102509)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 8 10:50:11 PDT 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Craig Topper (topperc)
<details>
<summary>Changes</summary>
This has received no development work in a while and is slowly bit rotting as new extensions are added.
At the moment, I don't think this is viable without adding a new invariant that 32 bit values are always in sign extended form like Mips64 does. We are very dependent on computeKnownBits and ComputeNumSignBits in SelectionDAG to remove sign extends created for ABI reasons. If we can't propagate sign bit information through 64-bit values in SelectionDAG, we can't effectively clean up those extends.
---
Patch is 618.63 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/102509.diff
31 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+26-284)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/alu32.ll (-276)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/condops.ll (-2284)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/div.ll (-696)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/imm.ll (-2741)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/mem.ll (-92)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/mem64.ll (-341)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rem.ll (-390)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64xtheadbb.ll (-877)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zba.ll (-1937)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-intrinsic.ll (-77)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb-zbkb.ll (-575)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbb.ll (-1051)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-intrinsic.ll (-42)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbc-zbkc-intrinsic.ll (-67)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb-intrinsic.ll (-73)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbkb.ll (-370)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/rv64zbs.ll (-1159)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat.ll (-151)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/sadd_sat_plus.ll (-185)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat.ll (-151)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/ssub_sat_plus.ll (-185)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat.ll (-120)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/uadd_sat_plus.ll (-141)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat.ll (-113)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/usub_sat_plus.ll (-131)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/vararg.ll (-1391)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/xaluo.ll (-2609)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmac.ll (-123)
- (removed) llvm/test/CodeGen/RISCV/rv64-legal-i32/xtheadmemidx.ll (-717)
- (modified) llvm/test/CodeGen/RISCV/shl-cttz.ll (+108-226)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b7a1a27a0f69c7..a9f6f1a463796d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -76,10 +76,6 @@ static cl::opt<int>
"use for creating a floating-point immediate value"),
cl::init(2));
-static cl::opt<bool>
- RV64LegalI32("riscv-experimental-rv64-legal-i32", cl::ReallyHidden,
- cl::desc("Make i32 a legal type for SelectionDAG on RV64."));
-
RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
const RISCVSubtarget &STI)
: TargetLowering(TM), Subtarget(STI) {
@@ -119,8 +115,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
// Set up the register classes.
addRegisterClass(XLenVT, &RISCV::GPRRegClass);
- if (Subtarget.is64Bit() && RV64LegalI32)
- addRegisterClass(MVT::i32, &RISCV::GPRRegClass);
if (Subtarget.hasStdExtZfhmin())
addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
@@ -243,12 +237,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
setCondCodeAction(ISD::SETGT, XLenVT, Custom);
setCondCodeAction(ISD::SETGE, XLenVT, Expand);
@@ -259,15 +249,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setCondCodeAction(ISD::SETLE, XLenVT, Expand);
}
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::SETCC, MVT::i32, Promote);
-
setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::VAARG, MVT::i32, Promote);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@@ -280,33 +265,20 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (Subtarget.is64Bit()) {
setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
- if (!RV64LegalI32) {
- setOperationAction(ISD::LOAD, MVT::i32, Custom);
- setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
- MVT::i32, Custom);
- setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
- MVT::i32, Custom);
- if (!Subtarget.hasStdExtZbb())
- setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::i32, Custom);
- } else {
- setOperationAction(ISD::SSUBO, MVT::i32, Custom);
- if (Subtarget.hasStdExtZbb()) {
- setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::i32, Custom);
- setOperationAction({ISD::UADDSAT, ISD::USUBSAT}, MVT::i32, Custom);
- }
- }
+ setOperationAction(ISD::LOAD, MVT::i32, Custom);
+ setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL},
+ MVT::i32, Custom);
+ setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT},
+ MVT::i32, Custom);
+ if (!Subtarget.hasStdExtZbb())
+ setOperationAction({ISD::SADDSAT, ISD::SSUBSAT}, MVT::i32, Custom);
setOperationAction(ISD::SADDO, MVT::i32, Custom);
}
if (!Subtarget.hasStdExtZmmul()) {
setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU}, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::MUL, MVT::i32, Promote);
} else if (Subtarget.is64Bit()) {
setOperationAction(ISD::MUL, MVT::i128, Custom);
- if (!RV64LegalI32)
- setOperationAction(ISD::MUL, MVT::i32, Custom);
- else
- setOperationAction(ISD::SMULO, MVT::i32, Custom);
+ setOperationAction(ISD::MUL, MVT::i32, Custom);
} else {
setOperationAction(ISD::MUL, MVT::i64, Custom);
}
@@ -314,20 +286,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (!Subtarget.hasStdExtM()) {
setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM},
XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, MVT::i32,
- Promote);
} else if (Subtarget.is64Bit()) {
- if (!RV64LegalI32)
- setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
- {MVT::i8, MVT::i16, MVT::i32}, Custom);
- }
-
- if (RV64LegalI32 && Subtarget.is64Bit()) {
- setOperationAction({ISD::MULHS, ISD::MULHU}, MVT::i32, Expand);
- setOperationAction(
- {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, MVT::i32,
- Expand);
+ setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM},
+ {MVT::i8, MVT::i16, MVT::i32}, Custom);
}
setOperationAction(
@@ -338,7 +299,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Custom);
if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) {
- if (!RV64LegalI32 && Subtarget.is64Bit())
+ if (Subtarget.is64Bit())
setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom);
} else if (Subtarget.hasVendorXTHeadBb()) {
if (Subtarget.is64Bit())
@@ -348,8 +309,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ROTL, XLenVT, Expand);
} else {
setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Expand);
}
// With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
@@ -359,13 +318,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
Subtarget.hasVendorXTHeadBb())
? Legal
: Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::BSWAP, MVT::i32,
- (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb() ||
- Subtarget.hasVendorXTHeadBb())
- ? Promote
- : Expand);
-
if (Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit()) {
setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
@@ -379,42 +331,24 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
(Subtarget.hasVendorXCValu() && !Subtarget.is64Bit())) {
setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT,
Legal);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, MVT::i32,
- Promote);
}
if (Subtarget.hasStdExtZbb() ||
(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit())) {
- if (Subtarget.is64Bit()) {
- if (RV64LegalI32)
- setOperationAction(ISD::CTTZ, MVT::i32, Legal);
- else
- setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
- }
+ if (Subtarget.is64Bit())
+ setOperationAction({ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF}, MVT::i32, Custom);
} else {
setOperationAction({ISD::CTTZ, ISD::CTPOP}, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::CTTZ, ISD::CTPOP}, MVT::i32, Expand);
}
if (Subtarget.hasStdExtZbb() || Subtarget.hasVendorXTHeadBb() ||
(Subtarget.hasVendorXCVbitmanip() && !Subtarget.is64Bit())) {
// We need the custom lowering to make sure that the resulting sequence
// for the 32bit case is efficient on 64bit targets.
- if (Subtarget.is64Bit()) {
- if (RV64LegalI32) {
- setOperationAction(ISD::CTLZ, MVT::i32,
- Subtarget.hasStdExtZbb() ? Legal : Promote);
- if (!Subtarget.hasStdExtZbb())
- setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);
- } else
- setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
- }
+ if (Subtarget.is64Bit())
+ setOperationAction({ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, MVT::i32, Custom);
} else {
setOperationAction(ISD::CTLZ, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::CTLZ, MVT::i32, Expand);
}
if (Subtarget.hasVendorXCValu() && !Subtarget.is64Bit()) {
@@ -422,15 +356,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
} else if (Subtarget.hasShortForwardBranchOpt()) {
// We can use PseudoCCSUB to implement ABS.
setOperationAction(ISD::ABS, XLenVT, Legal);
- } else if (!RV64LegalI32 && Subtarget.is64Bit()) {
+ } else if (Subtarget.is64Bit()) {
setOperationAction(ISD::ABS, MVT::i32, Custom);
}
- if (!Subtarget.hasVendorXTHeadCondMov()) {
+ if (!Subtarget.hasVendorXTHeadCondMov())
setOperationAction(ISD::SELECT, XLenVT, Custom);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::SELECT, MVT::i32, Promote);
- }
static const unsigned FPLegalNodeTypes[] = {
ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT,
@@ -614,11 +545,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
XLenVT, Legal);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT,
- ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP},
- MVT::i32, Legal);
-
setOperationAction(ISD::GET_ROUNDING, XLenVT, Custom);
setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
}
@@ -673,8 +599,6 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setBooleanVectorContents(ZeroOrOneBooleanContent);
setOperationAction(ISD::VSCALE, XLenVT, Custom);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::VSCALE, MVT::i32, Custom);
// RVV intrinsics may have illegal operands.
// We also need to custom legalize vmv.x.s.
@@ -1413,11 +1337,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
}
}
- if (Subtarget.hasStdExtA()) {
+ if (Subtarget.hasStdExtA())
setOperationAction(ISD::ATOMIC_LOAD_SUB, XLenVT, Expand);
- if (RV64LegalI32 && Subtarget.is64Bit())
- setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
- }
if (Subtarget.hasForcedAtomics()) {
// Force __sync libcalls to be emitted for atomic rmw/cas operations.
@@ -2340,9 +2261,6 @@ MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
MVT PartVT = TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
- if (RV64LegalI32 && Subtarget.is64Bit() && PartVT == MVT::i32)
- return MVT::i64;
-
return PartVT;
}
@@ -2364,12 +2282,6 @@ unsigned RISCVTargetLowering::getVectorTypeBreakdownForCallingConv(
unsigned NumRegs = TargetLowering::getVectorTypeBreakdownForCallingConv(
Context, CC, VT, IntermediateVT, NumIntermediates, RegisterVT);
- if (RV64LegalI32 && Subtarget.is64Bit() && IntermediateVT == MVT::i32)
- IntermediateVT = MVT::i64;
-
- if (RV64LegalI32 && Subtarget.is64Bit() && RegisterVT == MVT::i32)
- RegisterVT = MVT::i64;
-
return NumRegs;
}
@@ -5681,78 +5593,6 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
return Op;
}
-static SDValue lowerSADDSAT_SSUBSAT(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
-
- // With Zbb, we can widen to i64 and smin/smax with INT32_MAX/MIN.
- bool IsAdd = Op.getOpcode() == ISD::SADDSAT;
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Result =
- DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
-
- APInt MinVal = APInt::getSignedMinValue(32).sext(64);
- APInt MaxVal = APInt::getSignedMaxValue(32).sext(64);
- SDValue SatMin = DAG.getConstant(MinVal, DL, MVT::i64);
- SDValue SatMax = DAG.getConstant(MaxVal, DL, MVT::i64);
- Result = DAG.getNode(ISD::SMIN, DL, MVT::i64, Result, SatMax);
- Result = DAG.getNode(ISD::SMAX, DL, MVT::i64, Result, SatMin);
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Result);
-}
-
-static SDValue lowerUADDSAT_USUBSAT(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
-
- // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
- // sign extend allows overflow of the lower 32 bits to be detected on
- // the promoted size.
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue WideOp = DAG.getNode(Op.getOpcode(), DL, MVT::i64, LHS, RHS);
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, WideOp);
-}
-
-// Custom lower i32 SADDO/SSUBO with RV64LegalI32 so we take advantage of addw.
-static SDValue lowerSADDO_SSUBO(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
- if (isa<ConstantSDNode>(Op.getOperand(1)))
- return SDValue();
-
- bool IsAdd = Op.getOpcode() == ISD::SADDO;
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue WideOp =
- DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
- SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, WideOp);
- SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, WideOp,
- DAG.getValueType(MVT::i32));
- SDValue Ovf = DAG.getSetCC(DL, Op.getValue(1).getValueType(), WideOp, SExt,
- ISD::SETNE);
- return DAG.getMergeValues({Res, Ovf}, DL);
-}
-
-// Custom lower i32 SMULO with RV64LegalI32 so we take advantage of mulw.
-static SDValue lowerSMULO(SDValue Op, SelectionDAG &DAG) {
- assert(Op.getValueType() == MVT::i32 && RV64LegalI32 &&
- "Unexpected custom legalisation");
- SDLoc DL(Op);
- SDValue LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(0));
- SDValue RHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS);
- SDValue Res = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
- SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Mul,
- DAG.getValueType(MVT::i32));
- SDValue Ovf = DAG.getSetCC(DL, Op.getValue(1).getValueType(), Mul, SExt,
- ISD::SETNE);
- return DAG.getMergeValues({Res, Ovf}, DL);
-}
-
SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
@@ -6267,11 +6107,6 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return lowerFRAMEADDR(Op, DAG);
case ISD::RETURNADDR:
return lowerRETURNADDR(Op, DAG);
- case ISD::SADDO:
- case ISD::SSUBO:
- return lowerSADDO_SSUBO(Op, DAG);
- case ISD::SMULO:
- return lowerSMULO(Op, DAG);
case ISD::SHL_PARTS:
return lowerShiftLeftParts(Op, DAG);
case ISD::SRA_PARTS:
@@ -6710,7 +6545,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
SDValue Res =
makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
- if (Subtarget.is64Bit() && !RV64LegalI32)
+ if (Subtarget.is64Bit())
return DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Res);
return DAG.getBitcast(MVT::i32, Res);
}
@@ -6739,7 +6574,7 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::f16);
SDValue Res =
makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
- if (Subtarget.is64Bit() && !RV64LegalI32)
+ if (Subtarget.is64Bit())
return DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Res);
return DAG.getBitcast(MVT::i32, Res);
}
@@ -7033,13 +6868,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return lowerToScalableOp(Op, DAG);
case ISD::UADDSAT:
case ISD::USUBSAT:
- if (!Op.getValueType().isVector())
- return lowerUADDSAT_USUBSAT(Op, DAG);
return lowerToScalableOp(Op, DAG);
case ISD::SADDSAT:
case ISD::SSUBSAT:
- if (!Op.getValueType().isVector())
- return lowerSADDSAT_SSUBSAT(Op, DAG);
return lowerToScalableOp(Op, DAG);
case ISD::ABDS:
case ISD::ABDU: {
@@ -9098,13 +8929,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::riscv_sm3p1: Opc = RISCVISD::SM3P1; break;
}
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp);
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
-
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
}
case Intrinsic::riscv_sm4ks:
@@ -9112,16 +8936,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
unsigned Opc =
IntNo == Intrinsic::riscv_sm4ks ? RISCVISD::SM4KS : RISCVISD::SM4ED;
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
- SDValue Res =
- DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, Op.getOperand(3));
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
-
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2),
Op.getOperand(3));
}
@@ -9131,63 +8945,21 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
IntNo == Intrinsic::riscv_zip ? RISCVISD::ZIP : RISCVISD::UNZIP;
return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1));
}
- case Intrinsic::riscv_mopr: {
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue Res = DAG.getNode(
- RISCVISD::MOPR, DL, MVT::i64, NewOp,
- DAG.getTargetConstant(Op.getConstantOperandVal(2), DL, MVT::i64));
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
+ case Intrinsic::riscv_mopr:
return DAG.getNode(RISCVISD::MOPR, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2));
- }
case Intrinsic::riscv_moprr: {
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2));
- SDValue Res = DAG.getNode(
- RISCVISD::MOPRR, DL, MVT::i64, NewOp0, NewOp1,
- DAG.getTargetConstant(Op.getConstantOperandVal(3), DL, MVT::i64));
- return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res);
- }
return DAG.getNode(RISCVISD::MOPRR, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
}
case Intrinsic::riscv_clmul:
- if (RV64LegalI32 && Subtarget.is64Bit() && Op.getValueType() == MVT::i32) {
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(1));
- SDValue NewOp1 =
- DAG.getNode(ISD::ANY_EXTEND, D...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/102509
More information about the llvm-commits
mailing list