[llvm] [AArch64][WIP] Improve codegen for aarch64.sme.cnts* when not in streaming mode (PR #154761)

Kerry McLaughlin via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 21 06:37:27 PDT 2025


https://github.com/kmclaughlin-arm created https://github.com/llvm/llvm-project/pull/154761

None

>From f563d0ef2ec3133495b1ffd266fe6cc4ae9bee82 Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Wed, 20 Aug 2025 09:33:17 +0000
Subject: [PATCH 1/2] RDSVL tests

---
 .../CodeGen/AArch64/sme-intrinsics-rdsvl.ll   | 49 +++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll
index 5d10d7e13da14..b799f98981520 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll
@@ -40,6 +40,55 @@ define i64 @sme_cntsd() {
   ret i64 %v
 }
 
+define i64 @sme_cntsb_mul() {
+; CHECK-LABEL: sme_cntsb_mul:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdsvl x8, #1
+; CHECK-NEXT:    lsl x0, x8, #1
+; CHECK-NEXT:    ret
+  %v = call i64 @llvm.aarch64.sme.cntsb()
+  %res = mul i64 %v, 2
+  ret i64 %res
+}
+
+define i64 @sme_cntsh_mul() {
+; CHECK-LABEL: sme_cntsh_mul:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdsvl x8, #1
+; CHECK-NEXT:    lsr x8, x8, #1
+; CHECK-NEXT:    add x0, x8, x8, lsl #2
+; CHECK-NEXT:    ret
+  %v = call i64 @llvm.aarch64.sme.cntsh()
+  %res = mul i64 %v, 5
+  ret i64 %res
+}
+
+define i64 @sme_cntsw_mul() {
+; CHECK-LABEL: sme_cntsw_mul:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdsvl x8, #1
+; CHECK-NEXT:    lsr x8, x8, #2
+; CHECK-NEXT:    lsl x9, x8, #3
+; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    ret
+  %v = call i64 @llvm.aarch64.sme.cntsw()
+  %res = mul i64 %v, 7
+  ret i64 %res
+}
+
+define i64 @sme_cntsd_mul() {
+; CHECK-LABEL: sme_cntsd_mul:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    rdsvl x8, #1
+; CHECK-NEXT:    lsr x8, x8, #3
+; CHECK-NEXT:    add x8, x8, x8, lsl #1
+; CHECK-NEXT:    lsl x0, x8, #2
+; CHECK-NEXT:    ret
+  %v = call i64 @llvm.aarch64.sme.cntsd()
+  %res = mul i64 %v, 12
+  ret i64 %res
+}
+
 declare i64 @llvm.aarch64.sme.cntsb()
 declare i64 @llvm.aarch64.sme.cntsh()
 declare i64 @llvm.aarch64.sme.cntsw()

>From 93178f803e624e2c8f1f4fadba43b6f239ed652e Mon Sep 17 00:00:00 2001
From: Kerry McLaughlin <kerry.mclaughlin at arm.com>
Date: Wed, 13 Aug 2025 14:13:12 +0000
Subject: [PATCH 2/2] [AArch64][SME] Improve codegen for aarch64.sme.cnts* when
 not in streaming mode

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 36 ++++++++++---------
 .../lib/Target/AArch64/AArch64SMEInstrInfo.td | 23 ++++++++++++
 .../CodeGen/AArch64/sme-intrinsics-rdsvl.ll   | 20 +++++------
 3 files changed, 51 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d168cc8d1bd06..e8886fbf7af4e 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -6231,25 +6231,26 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
   case Intrinsic::aarch64_sve_clz:
     return DAG.getNode(AArch64ISD::CTLZ_MERGE_PASSTHRU, DL, Op.getValueType(),
                        Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
-  case Intrinsic::aarch64_sme_cntsb:
-    return DAG.getNode(AArch64ISD::RDSVL, DL, Op.getValueType(),
-                       DAG.getConstant(1, DL, MVT::i32));
+  case Intrinsic::aarch64_sme_cntsb: {
+    SDValue Cntd = DAG.getNode(
+        ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
+        DAG.getConstant(Intrinsic::aarch64_sme_cntsd, DL, MVT::i64));
+    return DAG.getNode(ISD::MUL, DL, MVT::i64, Cntd,
+                       DAG.getConstant(8, DL, MVT::i64));
+  }
   case Intrinsic::aarch64_sme_cntsh: {
-    SDValue One = DAG.getConstant(1, DL, MVT::i32);
-    SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, DL, Op.getValueType(), One);
-    return DAG.getNode(ISD::SRL, DL, Op.getValueType(), Bytes, One);
+    SDValue Cntd = DAG.getNode(
+        ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
+        DAG.getConstant(Intrinsic::aarch64_sme_cntsd, DL, MVT::i64));
+    return DAG.getNode(ISD::MUL, DL, MVT::i64, Cntd,
+                       DAG.getConstant(4, DL, MVT::i64));
   }
   case Intrinsic::aarch64_sme_cntsw: {
-    SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, DL, Op.getValueType(),
-                                DAG.getConstant(1, DL, MVT::i32));
-    return DAG.getNode(ISD::SRL, DL, Op.getValueType(), Bytes,
-                       DAG.getConstant(2, DL, MVT::i32));
-  }
-  case Intrinsic::aarch64_sme_cntsd: {
-    SDValue Bytes = DAG.getNode(AArch64ISD::RDSVL, DL, Op.getValueType(),
-                                DAG.getConstant(1, DL, MVT::i32));
-    return DAG.getNode(ISD::SRL, DL, Op.getValueType(), Bytes,
-                       DAG.getConstant(3, DL, MVT::i32));
+    SDValue Cntd = DAG.getNode(
+        ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
+        DAG.getConstant(Intrinsic::aarch64_sme_cntsd, DL, MVT::i64));
+    return DAG.getNode(ISD::MUL, DL, MVT::i64, Cntd,
+                       DAG.getConstant(2, DL, MVT::i64));
   }
   case Intrinsic::aarch64_sve_cnt: {
     SDValue Data = Op.getOperand(3);
@@ -19174,6 +19175,9 @@ static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
        if (ConstValue.sge(1) && ConstValue.sle(16))
          return SDValue();
 
+  if (getIntrinsicID(N0.getNode()) == Intrinsic::aarch64_sme_cntsd)
+    return SDValue();
+
   // Multiplication of a power of two plus/minus one can be done more
   // cheaply as shift+add/sub. For now, this is true unilaterally. If
   // future CPUs have a cheaper MADD instruction, this may need to be
diff --git a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
index 125225df15464..9da2046fb6176 100644
--- a/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SMEInstrInfo.td
@@ -127,12 +127,35 @@ def : Pat<(AArch64_requires_za_save), (RequiresZASavePseudo)>;
 def SDT_AArch64RDSVL  : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>;
 def AArch64rdsvl : SDNode<"AArch64ISD::RDSVL", SDT_AArch64RDSVL>;
 
+def sme_cntsb_imm : ComplexPattern<i64, 1, "SelectRDVLImm<1, 31, 8>">;
+def sme_cntsh_imm : ComplexPattern<i64, 1, "SelectRDVLImm<1, 31, 4>">;
+def sme_cntsw_imm : ComplexPattern<i64, 1, "SelectRDVLImm<1, 31, 2>">;
+def sme_cntsd_imm : ComplexPattern<i64, 1, "SelectRDVLImm<1, 31, 1>">;
+
 let Predicates = [HasSMEandIsNonStreamingSafe] in {
 def RDSVLI_XI  : sve_int_read_vl_a<0b0, 0b11111, "rdsvl", /*streaming_sve=*/0b1>;
 def ADDSPL_XXI : sve_int_arith_vl<0b1, "addspl", /*streaming_sve=*/0b1>;
 def ADDSVL_XXI : sve_int_arith_vl<0b0, "addsvl", /*streaming_sve=*/0b1>;
 
 def : Pat<(AArch64rdsvl (i32 simm6_32b:$imm)), (RDSVLI_XI simm6_32b:$imm)>;
+
+// e.g. cntsb() * imm
+def : Pat<(i64 (mul (int_aarch64_sme_cntsd), (sme_cntsb_imm i64:$imm))),
+          (RDSVLI_XI (!cast<SDNodeXForm>("trunc_imm") $imm))>;
+def : Pat<(i64 (mul (int_aarch64_sme_cntsd), (sme_cntsh_imm i64:$imm))),
+          (UBFMXri (RDSVLI_XI (!cast<SDNodeXForm>("trunc_imm") $imm)), 1, 63)>;
+def : Pat<(i64 (mul (int_aarch64_sme_cntsd), (sme_cntsw_imm i64:$imm))),
+          (UBFMXri (RDSVLI_XI (!cast<SDNodeXForm>("trunc_imm") $imm)), 2, 63)>;
+def : Pat<(i64 (mul (int_aarch64_sme_cntsd), (sme_cntsd_imm i64:$imm))),
+          (UBFMXri (RDSVLI_XI (!cast<SDNodeXForm>("trunc_imm") $imm)), 3, 63)>;
+
+// e.g. cntsb()
+def: Pat<(i64 (shl (int_aarch64_sme_cntsd), (i64 1))), (UBFMXri (RDSVLI_XI 1), 2, 63)>;
+def: Pat<(i64 (shl (int_aarch64_sme_cntsd), (i64 2))), (UBFMXri (RDSVLI_XI 1), 1, 63)>;
+def: Pat<(i64 (shl (int_aarch64_sme_cntsd), (i64 3))), (RDSVLI_XI 1)>;
+
+// Generic pattern for cntsd (RDSVL #1 >> 3)
+def : Pat<(i64 (int_aarch64_sme_cntsd)), (UBFMXri (RDSVLI_XI 1), 3, 63)>;
 }
 
 let Predicates = [HasSME] in {
diff --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll
index b799f98981520..8253db1d488e7 100644
--- a/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-rdsvl.ll
@@ -44,7 +44,8 @@ define i64 @sme_cntsb_mul() {
 ; CHECK-LABEL: sme_cntsb_mul:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    rdsvl x8, #1
-; CHECK-NEXT:    lsl x0, x8, #1
+; CHECK-NEXT:    lsr x8, x8, #3
+; CHECK-NEXT:    lsl x0, x8, #4
 ; CHECK-NEXT:    ret
   %v = call i64 @llvm.aarch64.sme.cntsb()
   %res = mul i64 %v, 2
@@ -54,9 +55,8 @@ define i64 @sme_cntsb_mul() {
 define i64 @sme_cntsh_mul() {
 ; CHECK-LABEL: sme_cntsh_mul:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdsvl x8, #1
-; CHECK-NEXT:    lsr x8, x8, #1
-; CHECK-NEXT:    add x0, x8, x8, lsl #2
+; CHECK-NEXT:    rdsvl x8, #5
+; CHECK-NEXT:    lsr x0, x8, #1
 ; CHECK-NEXT:    ret
   %v = call i64 @llvm.aarch64.sme.cntsh()
   %res = mul i64 %v, 5
@@ -66,10 +66,8 @@ define i64 @sme_cntsh_mul() {
 define i64 @sme_cntsw_mul() {
 ; CHECK-LABEL: sme_cntsw_mul:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdsvl x8, #1
-; CHECK-NEXT:    lsr x8, x8, #2
-; CHECK-NEXT:    lsl x9, x8, #3
-; CHECK-NEXT:    sub x0, x9, x8
+; CHECK-NEXT:    rdsvl x8, #7
+; CHECK-NEXT:    lsr x0, x8, #2
 ; CHECK-NEXT:    ret
   %v = call i64 @llvm.aarch64.sme.cntsw()
   %res = mul i64 %v, 7
@@ -79,10 +77,8 @@ define i64 @sme_cntsw_mul() {
 define i64 @sme_cntsd_mul() {
 ; CHECK-LABEL: sme_cntsd_mul:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    rdsvl x8, #1
-; CHECK-NEXT:    lsr x8, x8, #3
-; CHECK-NEXT:    add x8, x8, x8, lsl #1
-; CHECK-NEXT:    lsl x0, x8, #2
+; CHECK-NEXT:    rdsvl x8, #3
+; CHECK-NEXT:    lsr x0, x8, #1
 ; CHECK-NEXT:    ret
   %v = call i64 @llvm.aarch64.sme.cntsd()
   %res = mul i64 %v, 12



More information about the llvm-commits mailing list