[llvm] [AArch64] Remove trailing whitespace in IntrinsicsAArch64.td (NFC) (PR #164267)
Benjamin Maxwell via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 20 08:07:33 PDT 2025
https://github.com/MacDue created https://github.com/llvm/llvm-project/pull/164267
None
>From 639dc8dbb238d56cdfaf21d5b6206a51bad2a5a0 Mon Sep 17 00:00:00 2001
From: Benjamin Maxwell <benjamin.maxwell at arm.com>
Date: Mon, 20 Oct 2025 15:04:35 +0000
Subject: [PATCH] [AArch64] Remove trailing whitespace in IntrinsicsAArch64.td
(NFC)
---
llvm/include/llvm/IR/IntrinsicsAArch64.td | 40 +++++++++++------------
1 file changed, 20 insertions(+), 20 deletions(-)
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index b0269eec3347a..b81edc385cd43 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -522,7 +522,7 @@ let TargetPrefix = "aarch64" in {
def int_aarch64_neon_vcmla_rot90 : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_neon_vcmla_rot180 : AdvSIMD_3VectorArg_Intrinsic;
def int_aarch64_neon_vcmla_rot270 : AdvSIMD_3VectorArg_Intrinsic;
-
+
// FP8 fscale
def int_aarch64_neon_fp8_fscale : DefaultAttrsIntrinsic<
[llvm_anyvector_ty],
@@ -1467,7 +1467,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
LLVMSubdivide2VectorType<0>,
llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>]>;
-
+
class SVE2_1VectorArgIndexed_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>,
@@ -1482,7 +1482,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
llvm_i32_ty,
llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>, ImmArg<ArgIndex<4>>]>;
-
+
class SVE2_1VectorArg_Pred_Intrinsic
: DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[llvm_anyvector_ty],
@@ -1492,7 +1492,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
: DefaultAttrsIntrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[llvm_anyvector_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
-
+
class SVE2_Pred_1VectorArgIndexed_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>,
@@ -3353,11 +3353,11 @@ let TargetPrefix = "aarch64" in {
: DefaultAttrsIntrinsic<[llvm_nxv8bf16_ty],
[llvm_nxv4f32_ty, llvm_nxv4f32_ty],
[IntrNoMem]>;
-
+
class SVE2_CVT_WIDENING_VG2_Intrinsic
: DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
[LLVMSubdivide2VectorType<0>], [IntrNoMem]>;
-
+
class SVE2_CVT_VG4_SINGLE_Intrinsic
: DefaultAttrsIntrinsic<[LLVMSubdivide4VectorType<0>],
@@ -3740,7 +3740,7 @@ let TargetPrefix = "aarch64" in {
llvm_anyvector_ty, LLVMMatchType<0>,
LLVMMatchType<0>, LLVMMatchType<0>],
[IntrInaccessibleMemOnly, IntrWriteMem]>;
-
+
class SME2_Add_Sub_Write_VG4_Multi_Multi_Intrinsic
: DefaultAttrsIntrinsic<[],
[llvm_i32_ty,
@@ -3887,7 +3887,7 @@ let TargetPrefix = "aarch64" in {
def int_aarch64_sme_luti4_lane_zt
: DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_i32_ty, llvm_nxv16i8_ty, llvm_i32_ty],
[ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, IntrInaccessibleMemOnly, IntrReadMem]>;
-
+
// Lookup table expand two registers
//
def int_aarch64_sme_luti2_lane_zt_x2
@@ -3896,7 +3896,7 @@ let TargetPrefix = "aarch64" in {
def int_aarch64_sme_luti4_lane_zt_x2
: DefaultAttrsIntrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_i32_ty, llvm_nxv16i8_ty, llvm_i32_ty],
[ImmArg<ArgIndex<0>>, ImmArg<ArgIndex<2>>, IntrInaccessibleMemOnly, IntrReadMem]>;
-
+
//
// Lookup table expand four registers
//
@@ -3914,7 +3914,7 @@ let TargetPrefix = "aarch64" in {
[llvm_i32_ty, llvm_nxv16i8_ty, llvm_nxv16i8_ty],
[ImmArg<ArgIndex<0>>, IntrInaccessibleMemOnly, IntrReadMem]>;
-
+
//
// Register scaling
//
@@ -3962,7 +3962,7 @@ def int_aarch64_sve_extq : AdvSIMD_2VectorArgIndexed_Intrinsic;
//
// SVE2.1 - Move predicate to/from vector
//
-def int_aarch64_sve_pmov_to_pred_lane : SVE2_1VectorArgIndexed_Pred_Intrinsic;
+def int_aarch64_sve_pmov_to_pred_lane : SVE2_1VectorArgIndexed_Pred_Intrinsic;
def int_aarch64_sve_pmov_to_pred_lane_zero : SVE2_1VectorArg_Pred_Intrinsic;
@@ -4004,10 +4004,10 @@ let TargetPrefix = "aarch64" in {
: DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
[llvm_anyvector_ty, LLVMMatchType<0>],
[IntrReadMem, IntrInaccessibleMemOnly]>;
-
+
def int_aarch64_sve_fp8_cvtn : SVE2_FP8_Narrow_Cvt;
def int_aarch64_sve_fp8_cvtnb : SVE2_FP8_Narrow_Cvt;
-
+
def int_aarch64_sve_fp8_cvtnt
: DefaultAttrsIntrinsic<[llvm_nxv16i8_ty],
[llvm_nxv16i8_ty, llvm_anyvector_ty, LLVMMatchType<0>],
@@ -4019,32 +4019,32 @@ let TargetPrefix = "aarch64" in {
[LLVMMatchType<0>,
llvm_nxv16i8_ty, llvm_nxv16i8_ty],
[IntrReadMem, IntrInaccessibleMemOnly]>;
-
+
class SVE2_FP8_FMLA_FDOT_Lane
: DefaultAttrsIntrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>,
llvm_nxv16i8_ty, llvm_nxv16i8_ty, llvm_i32_ty],
[IntrReadMem, IntrInaccessibleMemOnly, ImmArg<ArgIndex<3>>]>;
-
+
def int_aarch64_sve_fp8_fdot : SVE2_FP8_FMLA_FDOT;
def int_aarch64_sve_fp8_fdot_lane : SVE2_FP8_FMLA_FDOT_Lane;
// Fused multiply-add
def int_aarch64_sve_fp8_fmlalb : SVE2_FP8_FMLA_FDOT;
def int_aarch64_sve_fp8_fmlalb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+
def int_aarch64_sve_fp8_fmlalt : SVE2_FP8_FMLA_FDOT;
def int_aarch64_sve_fp8_fmlalt_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+
def int_aarch64_sve_fp8_fmlallbb : SVE2_FP8_FMLA_FDOT;
def int_aarch64_sve_fp8_fmlallbb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+
def int_aarch64_sve_fp8_fmlallbt : SVE2_FP8_FMLA_FDOT;
def int_aarch64_sve_fp8_fmlallbt_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+
def int_aarch64_sve_fp8_fmlalltb : SVE2_FP8_FMLA_FDOT;
def int_aarch64_sve_fp8_fmlalltb_lane : SVE2_FP8_FMLA_FDOT_Lane;
-
+
def int_aarch64_sve_fp8_fmlalltt : SVE2_FP8_FMLA_FDOT;
def int_aarch64_sve_fp8_fmlalltt_lane : SVE2_FP8_FMLA_FDOT_Lane;
More information about the llvm-commits
mailing list