[llvm] dbf44c2 - [AArch64] Remove unused tablegen classes and code cleanup. NFC
David Green via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 2 04:40:21 PDT 2025
Author: David Green
Date: 2025-10-02T12:40:15+01:00
New Revision: dbf44c28234670361017429871286324ced1e5f3
URL: https://github.com/llvm/llvm-project/commit/dbf44c28234670361017429871286324ced1e5f3
DIFF: https://github.com/llvm/llvm-project/commit/dbf44c28234670361017429871286324ced1e5f3.diff
LOG: [AArch64] Remove unused tablegen classes and code cleanup. NFC
Added:
Modified:
llvm/include/llvm/IR/IntrinsicsAArch64.td
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/lib/Target/AArch64/AArch64InstrFormats.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index 7c9aef52b3acf..fbc92d77da1ab 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -130,8 +130,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
class AdvSIMD_1VectorArg_Expand_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
- class AdvSIMD_1VectorArg_Long_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
class AdvSIMD_1IntArg_Narrow_Intrinsic
: DefaultAttrsIntrinsic<[llvm_any_ty], [llvm_any_ty], [IntrNoMem]>;
class AdvSIMD_1VectorArg_Narrow_Intrinsic
@@ -150,9 +148,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
class AdvSIMD_2VectorArg_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Compare_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
class AdvSIMD_2Arg_FloatCompare_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
[IntrNoMem]>;
@@ -160,10 +155,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>, LLVMTruncatedType<0>],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Wide_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, LLVMTruncatedType<0>],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Narrow_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMExtendedType<0>, LLVMExtendedType<0>],
@@ -172,10 +163,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyint_ty],
[LLVMExtendedType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>],
@@ -184,10 +171,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMTruncatedType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty],
- [IntrNoMem]>;
class AdvSIMD_2VectorArg_Lane_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_anyint_ty, llvm_i32_ty],
@@ -205,14 +188,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
[IntrNoMem]>;
- class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty,
- LLVMMatchType<1>], [IntrNoMem]>;
- class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMOneNthElementsVectorType<0, 2>, llvm_anyvector_ty, llvm_i32_ty],
- [IntrNoMem]>;
class AdvSIMD_CvtFxToFP_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
[IntrNoMem]>;
@@ -238,11 +213,6 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem]>;
- class AdvSIMD_FML_Intrinsic
- : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
- [IntrNoMem]>;
-
class AdvSIMD_BF16FML_Intrinsic
: DefaultAttrsIntrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v8bf16_ty, llvm_v8bf16_ty],
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index debe4d9f4aeb8..50a0562edaff1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6618,7 +6618,6 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
"llvm.eh.recoverfp must take a function as the first argument");
return IncomingFPOp;
}
-
case Intrinsic::aarch64_neon_vsri:
case Intrinsic::aarch64_neon_vsli:
case Intrinsic::aarch64_sve_sri:
@@ -15155,9 +15154,7 @@ static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {
: Shift.getOperand(1);
unsigned Inst = IsShiftRight ? AArch64ISD::VSRI : AArch64ISD::VSLI;
- SDValue ResultSLI = DAG.getNode(Inst, DL, VT, X, Y, Imm);
-
- return ResultSLI;
+ return DAG.getNode(Inst, DL, VT, X, Y, Imm);
}
static SDValue tryLowerToBSL(SDValue N, SelectionDAG &DAG) {
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index f07d3514d1a99..6ef0a95d7406d 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -10176,28 +10176,6 @@ multiclass SIMDScalarLShiftBHSD<bit U, bits<5> opc, string asm,
(!cast<Instruction>(NAME # "d") FPR64:$Rn, vecshiftL64:$imm)>;
}
-multiclass SIMDScalarRShiftBHSD<bit U, bits<5> opc, string asm> {
- def b : BaseSIMDScalarShift<U, opc, {0,0,0,1,?,?,?},
- FPR8, FPR8, vecshiftR8, asm, []> {
- let Inst{18-16} = imm{2-0};
- }
-
- def h : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
- FPR16, FPR16, vecshiftR16, asm, []> {
- let Inst{19-16} = imm{3-0};
- }
-
- def s : BaseSIMDScalarShift<U, opc, {0,1,?,?,?,?,?},
- FPR32, FPR32, vecshiftR32, asm, []> {
- let Inst{20-16} = imm{4-0};
- }
-
- def d : BaseSIMDScalarShift<U, opc, {1,?,?,?,?,?,?},
- FPR64, FPR64, vecshiftR64, asm, []> {
- let Inst{21-16} = imm{5-0};
- }
-}
-
//----------------------------------------------------------------------------
// AdvSIMD vector x indexed element
//----------------------------------------------------------------------------
More information about the llvm-commits
mailing list