[llvm] [AArch64][GlobalISel] Add patterns for scalar sqdmlal/sqdmlsl (PR #187246)

Joshua Rodriguez via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 1 06:14:53 PDT 2026


https://github.com/JoshdRod updated https://github.com/llvm/llvm-project/pull/187246

>From b65505066fb9a466849a32ac9f43aba6dfaba8fc Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <josh.rodriguez at arm.com>
Date: Wed, 18 Mar 2026 12:07:08 +0000
Subject: [PATCH 1/7] [AArch64][GlobalISel] Add patterns for scalar
 sqdmlal/sqdmlsl

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td   |  12 ++
 .../AArch64/GISel/AArch64RegisterBankInfo.cpp |   1 +
 llvm/test/CodeGen/AArch64/arm64-int-neon.ll   |   3 +-
 llvm/test/CodeGen/AArch64/arm64-vmul.ll       | 130 ++++++++++++------
 4 files changed, 100 insertions(+), 46 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 2ecfdcf1508ac..37e2330ee3fb5 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6638,10 +6638,18 @@ def : Pat<(f64 (AArch64sqadd FPR64:$Rd,
                 (AArch64sqdmull FPR32:$Rn, FPR32:$Rm))),
           (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
 
+def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
+                (int_aarch64_neon_sqdmulls_scalar FPR32:$Rn, FPR32:$Rm))),
+          (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
+
 def : Pat<(f64 (AArch64sqsub FPR64:$Rd,
                (AArch64sqdmull FPR32:$Rn, FPR32:$Rm))),
           (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
 
+def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
+                (int_aarch64_neon_sqdmulls_scalar FPR32:$Rn, FPR32:$Rm))),
+          (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
+
 //===----------------------------------------------------------------------===//
 // Advanced SIMD two scalar instructions.
 //===----------------------------------------------------------------------===//
@@ -9055,6 +9063,10 @@ def : Pat<(f64 (AArch64sqdmull FPR32:$Rn,
                                                            VectorIndexS:$idx)))))),
           (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
 
+def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
+                                          (i32 FPR32:$Rm)),
+          (SQDMULLi32 FPR32:$Rn, FPR32:$Rm)>;
+
 //----------------------------------------------------------------------------
 // AdvSIMD scalar shift instructions
 //----------------------------------------------------------------------------
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
index 2657a89f9d9cf..de0577a893005 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64RegisterBankInfo.cpp
@@ -625,6 +625,7 @@ static bool isFPIntrinsic(const MachineRegisterInfo &MRI,
   case Intrinsic::aarch64_neon_sqadd:
   case Intrinsic::aarch64_neon_uqsub:
   case Intrinsic::aarch64_neon_sqsub:
+  case Intrinsic::aarch64_neon_sqdmulls_scalar:
   case Intrinsic::aarch64_neon_srshl:
   case Intrinsic::aarch64_neon_urshl:
   case Intrinsic::aarch64_neon_sqshl:
diff --git a/llvm/test/CodeGen/AArch64/arm64-int-neon.ll b/llvm/test/CodeGen/AArch64/arm64-int-neon.ll
index b1e6750ac85b5..b72707e6fd074 100644
--- a/llvm/test/CodeGen/AArch64/arm64-int-neon.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-int-neon.ll
@@ -2,8 +2,7 @@
 ; RUN: llc < %s -mtriple aarch64-unknown-unknown -mattr=+fprcvt,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
 ; RUN: llc < %s -mtriple aarch64-unknown-unknown -global-isel -global-isel-abort=2 -mattr=+fprcvt,+fullfp16 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
 
-; CHECK-GI:    warning: Instruction selection used fallback path for test_sqdmulls_scalar
-; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for test_sqdmulh_scalar
+; CHECK-GI:    warning: Instruction selection used fallback path for test_sqdmulh_scalar
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for test_sqabs_s32
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for test_sqabs_s64
 ; CHECK-GI-NEXT:    warning: Instruction selection used fallback path for test_sqneg_s32
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 7b639822a0dc0..a365495009a17 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -19,9 +19,6 @@
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_4s_strict
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmla_indexed_scalar_2d_strict
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmulh_lane_1s
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for test_vqdmulls_lane_s32
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlal_lane_1d
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlsl_lane_1d
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v4f32
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v2f32
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for scalar_fmls_from_extract_v2f64
@@ -30,8 +27,6 @@
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v4f32
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v4f32_1
 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fmls_with_fneg_before_extract_v2f64
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlal_d
-; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqdmlsl_d
 
 define <8 x i16> @smull8h(ptr %A, ptr %B) nounwind {
 ; CHECK-LABEL: smull8h:
@@ -1794,13 +1789,22 @@ define i32 @sqsub_lane1_sqdmull4s(i32 %A, <4 x i16> %B, <4 x i16> %C) nounwind {
 }
 
 define i64 @test_vqdmulls_lane_s32(i32 noundef %a, <2 x i32> noundef %b) {
-; CHECK-LABEL: test_vqdmulls_lane_s32:
-; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    fmov s1, w0
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    sqdmull d0, s1, v0.s[1]
-; CHECK-NEXT:    fmov x0, d0
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: test_vqdmulls_lane_s32:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    fmov s1, w0
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    sqdmull d0, s1, v0.s[1]
+; CHECK-SD-NEXT:    fmov x0, d0
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: test_vqdmulls_lane_s32:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    fmov s1, w0
+; CHECK-GI-NEXT:    mov s0, v0.s[1]
+; CHECK-GI-NEXT:    sqdmull d0, s1, s0
+; CHECK-GI-NEXT:    fmov x0, d0
+; CHECK-GI-NEXT:    ret
 entry:
   %vget_lane = extractelement <2 x i32> %b, i64 1
   %vqdmulls_s32.i = tail call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %a, i32 %vget_lane)
@@ -1808,14 +1812,24 @@ entry:
 }
 
 define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
-; CHECK-LABEL: sqdmlal_lane_1d:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s1, w1
-; CHECK-NEXT:    fmov d2, x0
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    sqdmlal d2, s1, v0.s[1]
-; CHECK-NEXT:    fmov x0, d2
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: sqdmlal_lane_1d:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmov s1, w1
+; CHECK-SD-NEXT:    fmov d2, x0
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    sqdmlal d2, s1, v0.s[1]
+; CHECK-SD-NEXT:    fmov x0, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sqdmlal_lane_1d:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    fmov s1, w1
+; CHECK-GI-NEXT:    fmov d2, x0
+; CHECK-GI-NEXT:    mov s0, v0.s[1]
+; CHECK-GI-NEXT:    sqdmlal d2, s1, s0
+; CHECK-GI-NEXT:    fmov x0, d2
+; CHECK-GI-NEXT:    ret
   %rhs = extractelement <2 x i32> %C, i32 1
   %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %B, i32 %rhs)
   %res = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %A, i64 %prod)
@@ -1825,14 +1839,24 @@ declare i64 @llvm.aarch64.neon.sqdmulls.scalar(i32, i32)
 declare i64 @llvm.aarch64.neon.sqadd.i64(i64, i64)
 
 define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
-; CHECK-LABEL: sqdmlsl_lane_1d:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s1, w1
-; CHECK-NEXT:    fmov d2, x0
-; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT:    sqdmlsl d2, s1, v0.s[1]
-; CHECK-NEXT:    fmov x0, d2
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: sqdmlsl_lane_1d:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmov s1, w1
+; CHECK-SD-NEXT:    fmov d2, x0
+; CHECK-SD-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-SD-NEXT:    sqdmlsl d2, s1, v0.s[1]
+; CHECK-SD-NEXT:    fmov x0, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sqdmlsl_lane_1d:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-GI-NEXT:    fmov s1, w1
+; CHECK-GI-NEXT:    fmov d2, x0
+; CHECK-GI-NEXT:    mov s0, v0.s[1]
+; CHECK-GI-NEXT:    sqdmlsl d2, s1, s0
+; CHECK-GI-NEXT:    fmov x0, d2
+; CHECK-GI-NEXT:    ret
   %rhs = extractelement <2 x i32> %C, i32 1
   %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %B, i32 %rhs)
   %res = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %A, i64 %prod)
@@ -3216,14 +3240,23 @@ define i32 @sqdmlal_s(i16 %A, i16 %B, i32 %C) nounwind {
 }
 
 define i64 @sqdmlal_d(i32 %A, i32 %B, i64 %C) nounwind {
-; CHECK-LABEL: sqdmlal_d:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s0, w1
-; CHECK-NEXT:    fmov s1, w0
-; CHECK-NEXT:    fmov d2, x2
-; CHECK-NEXT:    sqdmlal d2, s1, s0
-; CHECK-NEXT:    fmov x0, d2
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: sqdmlal_d:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmov s0, w1
+; CHECK-SD-NEXT:    fmov s1, w0
+; CHECK-SD-NEXT:    fmov d2, x2
+; CHECK-SD-NEXT:    sqdmlal d2, s1, s0
+; CHECK-SD-NEXT:    fmov x0, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sqdmlal_d:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s0, w0
+; CHECK-GI-NEXT:    fmov s1, w1
+; CHECK-GI-NEXT:    fmov d2, x2
+; CHECK-GI-NEXT:    sqdmlal d2, s0, s1
+; CHECK-GI-NEXT:    fmov x0, d2
+; CHECK-GI-NEXT:    ret
   %tmp4 = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %A, i32 %B)
   %tmp5 = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %C, i64 %tmp4)
   ret i64 %tmp5
@@ -3256,14 +3289,23 @@ define i32 @sqdmlsl_s(i16 %A, i16 %B, i32 %C) nounwind {
 }
 
 define i64 @sqdmlsl_d(i32 %A, i32 %B, i64 %C) nounwind {
-; CHECK-LABEL: sqdmlsl_d:
-; CHECK:       // %bb.0:
-; CHECK-NEXT:    fmov s0, w1
-; CHECK-NEXT:    fmov s1, w0
-; CHECK-NEXT:    fmov d2, x2
-; CHECK-NEXT:    sqdmlsl d2, s1, s0
-; CHECK-NEXT:    fmov x0, d2
-; CHECK-NEXT:    ret
+; CHECK-SD-LABEL: sqdmlsl_d:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmov s0, w1
+; CHECK-SD-NEXT:    fmov s1, w0
+; CHECK-SD-NEXT:    fmov d2, x2
+; CHECK-SD-NEXT:    sqdmlsl d2, s1, s0
+; CHECK-SD-NEXT:    fmov x0, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sqdmlsl_d:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    fmov s0, w0
+; CHECK-GI-NEXT:    fmov s1, w1
+; CHECK-GI-NEXT:    fmov d2, x2
+; CHECK-GI-NEXT:    sqdmlsl d2, s0, s1
+; CHECK-GI-NEXT:    fmov x0, d2
+; CHECK-GI-NEXT:    ret
   %tmp4 = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %A, i32 %B)
   %tmp5 = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %C, i64 %tmp4)
   ret i64 %tmp5

>From 16a3e0a90c81491786d0e5eac9ab0f31d902568d Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <josh.rodriguez at arm.com>
Date: Thu, 19 Mar 2026 13:43:45 +0000
Subject: [PATCH 2/7] [AArch64][GlobalISel] Select lane index sqdmlal when
 vector_extract of v4i32 present

SQDMLALv1i64_indexed takes in an index of a vector as its final operand, meaning it doesn't need to extract the element in a separate instruction.

This only works when the vector to extract from is a v4i32. Currently, extracting from a v2i32 doesn't work, and I'm unsure why.
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 37e2330ee3fb5..a93823f80b471 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -9067,6 +9067,12 @@ def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
                                           (i32 FPR32:$Rm)),
           (SQDMULLi32 FPR32:$Rn, FPR32:$Rm)>;
 
+def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
+                (int_aarch64_neon_sqdmulls_scalar FPR32:$Rn,
+                                          (vector_extract (v4i32 V128:$Vm),
+                                                           VectorIndexS:$idx)))),
+          (SQDMLALv1i64_indexed FPR64:$Rd, FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
+
 //----------------------------------------------------------------------------
 // AdvSIMD scalar shift instructions
 //----------------------------------------------------------------------------

>From d482215696b8f51b57473d40a0dd283a7dd1e50f Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <josh.rodriguez at arm.com>
Date: Tue, 24 Mar 2026 11:57:16 +0000
Subject: [PATCH 3/7] [AArch64][GlobalISel] Add test for v4i32 vector extract
 sqdmlal/sqdmlsl

1. Tests only test v4i32 versions of the intrinsic, as v2i32 currently doesn't work.
2. GlobalISel currently generates poor code in the sqdmlsl case. To fix, the sqdmlalvi64_indexed pattern needs to be copied over for sqdmlsl.
---
 llvm/test/CodeGen/AArch64/arm64-vmul.ll | 48 +++++++++++++++++++++----
 1 file changed, 42 insertions(+), 6 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index a365495009a17..d6f96a2184d6e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -1811,8 +1811,8 @@ entry:
   ret i64 %vqdmulls_s32.i
 }
 
-define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
-; CHECK-SD-LABEL: sqdmlal_lane_1d:
+define i64 @sqdmlal_lane_1d_v2i32(i64 %A, i32 %B, <2 x i32> %C) nounwind {
+; CHECK-SD-LABEL: sqdmlal_lane_1d_v2i32:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    fmov s1, w1
 ; CHECK-SD-NEXT:    fmov d2, x0
@@ -1821,7 +1821,7 @@ define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
 ; CHECK-SD-NEXT:    fmov x0, d2
 ; CHECK-SD-NEXT:    ret
 ;
-; CHECK-GI-LABEL: sqdmlal_lane_1d:
+; CHECK-GI-LABEL: sqdmlal_lane_1d_v2i32:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-GI-NEXT:    fmov s1, w1
@@ -1838,8 +1838,8 @@ define i64 @sqdmlal_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
 declare i64 @llvm.aarch64.neon.sqdmulls.scalar(i32, i32)
 declare i64 @llvm.aarch64.neon.sqadd.i64(i64, i64)
 
-define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
-; CHECK-SD-LABEL: sqdmlsl_lane_1d:
+define i64 @sqdmlsl_lane_1d_v2i32(i64 %A, i32 %B, <2 x i32> %C) nounwind {
+; CHECK-SD-LABEL: sqdmlsl_lane_1d_v2i32:
 ; CHECK-SD:       // %bb.0:
 ; CHECK-SD-NEXT:    fmov s1, w1
 ; CHECK-SD-NEXT:    fmov d2, x0
@@ -1848,7 +1848,7 @@ define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
 ; CHECK-SD-NEXT:    fmov x0, d2
 ; CHECK-SD-NEXT:    ret
 ;
-; CHECK-GI-LABEL: sqdmlsl_lane_1d:
+; CHECK-GI-LABEL: sqdmlsl_lane_1d_v2i32:
 ; CHECK-GI:       // %bb.0:
 ; CHECK-GI-NEXT:    // kill: def $d0 killed $d0 def $q0
 ; CHECK-GI-NEXT:    fmov s1, w1
@@ -1864,6 +1864,42 @@ define i64 @sqdmlsl_lane_1d(i64 %A, i32 %B, <2 x i32> %C) nounwind {
 }
 declare i64 @llvm.aarch64.neon.sqsub.i64(i64, i64)
 
+define i64 @sqdmlal_lane_1d_v4i32(i64 %A, i32 %B, <4 x i32> %C) nounwind {
+; CHECK-LABEL: sqdmlal_lane_1d_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s1, w1
+; CHECK-NEXT:    fmov d2, x0
+; CHECK-NEXT:    sqdmlal d2, s1, v0.s[1]
+; CHECK-NEXT:    fmov x0, d2
+; CHECK-NEXT:    ret
+  %rhs = extractelement <4 x i32> %C, i32 1
+  %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %B, i32 %rhs)
+  %res = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %A, i64 %prod)
+  ret i64 %res
+}
+
+define i64 @sqdmlsl_lane_1d_v4i32(i64 %A, i32 %B, <4 x i32> %C) nounwind {
+; CHECK-SD-LABEL: sqdmlsl_lane_1d_v4i32:
+; CHECK-SD:       // %bb.0:
+; CHECK-SD-NEXT:    fmov s1, w1
+; CHECK-SD-NEXT:    fmov d2, x0
+; CHECK-SD-NEXT:    sqdmlsl d2, s1, v0.s[1]
+; CHECK-SD-NEXT:    fmov x0, d2
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: sqdmlsl_lane_1d_v4i32:
+; CHECK-GI:       // %bb.0:
+; CHECK-GI-NEXT:    mov s0, v0.s[1]
+; CHECK-GI-NEXT:    fmov s1, w1
+; CHECK-GI-NEXT:    fmov d2, x0
+; CHECK-GI-NEXT:    sqdmlsl d2, s1, s0
+; CHECK-GI-NEXT:    fmov x0, d2
+; CHECK-GI-NEXT:    ret
+  %rhs = extractelement <4 x i32> %C, i32 1
+  %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %B, i32 %rhs)
+  %res = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %A, i64 %prod)
+  ret i64 %res
+}
 
 define <4 x i32> @umlal_lane_4s(<4 x i16> %A, <4 x i16> %B, <4 x i32> %C) nounwind {
 ; CHECK-LABEL: umlal_lane_4s:

>From a5b9abc09c9c4263bc8656c8b35ab6dba5d504e9 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <josh.rodriguez at arm.com>
Date: Tue, 24 Mar 2026 14:22:01 +0000
Subject: [PATCH 4/7] [AArch64][GlobalISel] Selet SQDMLSLv1i64_indexed when
 vector_extract present

Like SQDMLALv1i64_indexed, selecting this intrinsic reduces the number of instructions generated by 1, as it performs both the vector extract and the sqdmlal in one instruction.

This only works when the vector to extract from is v4i32, not v2i32. This is due to some issues GlobalISel has selecting intrinsics using v2i32.
---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td |  6 ++++++
 llvm/test/CodeGen/AArch64/arm64-vmul.ll     | 23 +++++++--------------
 2 files changed, 13 insertions(+), 16 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index a93823f80b471..c87d3802da8e7 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -9073,6 +9073,12 @@ def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
                                                            VectorIndexS:$idx)))),
           (SQDMLALv1i64_indexed FPR64:$Rd, FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
 
+def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
+                (int_aarch64_neon_sqdmulls_scalar FPR32:$Rn,
+                                          (vector_extract (v4i32 V128:$Vm),
+                                                           VectorIndexS:$idx)))),
+          (SQDMLSLv1i64_indexed FPR64:$Rd, FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
+
 //----------------------------------------------------------------------------
 // AdvSIMD scalar shift instructions
 //----------------------------------------------------------------------------
diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index d6f96a2184d6e..5b7eb6ca125a0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -1879,22 +1879,13 @@ define i64 @sqdmlal_lane_1d_v4i32(i64 %A, i32 %B, <4 x i32> %C) nounwind {
 }
 
 define i64 @sqdmlsl_lane_1d_v4i32(i64 %A, i32 %B, <4 x i32> %C) nounwind {
-; CHECK-SD-LABEL: sqdmlsl_lane_1d_v4i32:
-; CHECK-SD:       // %bb.0:
-; CHECK-SD-NEXT:    fmov s1, w1
-; CHECK-SD-NEXT:    fmov d2, x0
-; CHECK-SD-NEXT:    sqdmlsl d2, s1, v0.s[1]
-; CHECK-SD-NEXT:    fmov x0, d2
-; CHECK-SD-NEXT:    ret
-;
-; CHECK-GI-LABEL: sqdmlsl_lane_1d_v4i32:
-; CHECK-GI:       // %bb.0:
-; CHECK-GI-NEXT:    mov s0, v0.s[1]
-; CHECK-GI-NEXT:    fmov s1, w1
-; CHECK-GI-NEXT:    fmov d2, x0
-; CHECK-GI-NEXT:    sqdmlsl d2, s1, s0
-; CHECK-GI-NEXT:    fmov x0, d2
-; CHECK-GI-NEXT:    ret
+; CHECK-LABEL: sqdmlsl_lane_1d_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fmov s1, w1
+; CHECK-NEXT:    fmov d2, x0
+; CHECK-NEXT:    sqdmlsl d2, s1, v0.s[1]
+; CHECK-NEXT:    fmov x0, d2
+; CHECK-NEXT:    ret
   %rhs = extractelement <4 x i32> %C, i32 1
   %prod = call i64 @llvm.aarch64.neon.sqdmulls.scalar(i32 %B, i32 %rhs)
   %res = call i64 @llvm.aarch64.neon.sqsub.i64(i64 %A, i64 %prod)

>From 7a33b1d707f53421a5d2fbdf0f924697854a9924 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <josh.rodriguez at arm.com>
Date: Tue, 24 Mar 2026 15:06:50 +0000
Subject: [PATCH 5/7] [AArch64][GlobalISel] Move new SQDMULLi32 pattern to join
 the others

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index c87d3802da8e7..bcdeeb93fb64a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6650,6 +6650,10 @@ def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
                 (int_aarch64_neon_sqdmulls_scalar FPR32:$Rn, FPR32:$Rm))),
           (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
 
+def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
+                                          (i32 FPR32:$Rm)),
+          (SQDMULLi32 FPR32:$Rn, FPR32:$Rm)>;
+
 //===----------------------------------------------------------------------===//
 // Advanced SIMD two scalar instructions.
 //===----------------------------------------------------------------------===//
@@ -9063,10 +9067,6 @@ def : Pat<(f64 (AArch64sqdmull FPR32:$Rn,
                                                            VectorIndexS:$idx)))))),
           (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
 
-def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
-                                          (i32 FPR32:$Rm)),
-          (SQDMULLi32 FPR32:$Rn, FPR32:$Rm)>;
-
 def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
                 (int_aarch64_neon_sqdmulls_scalar FPR32:$Rn,
                                           (vector_extract (v4i32 V128:$Vm),

>From 82648dfcda1593082511ef01350a1bf4c9c1918c Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <josh.rodriguez at arm.com>
Date: Wed, 1 Apr 2026 13:06:48 +0000
Subject: [PATCH 6/7] [AArch64][GlobalISel] Move patterns so they are in same
 order as instructions

---
 llvm/lib/Target/AArch64/AArch64InstrInfo.td | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index bcdeeb93fb64a..35a90ea413ee4 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -6634,6 +6634,10 @@ defm SQDMULL  : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
 defm SQDMLAL  : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
 defm SQDMLSL  : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
 
+def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
+                                          (i32 FPR32:$Rm)),
+          (SQDMULLi32 FPR32:$Rn, FPR32:$Rm)>;
+
 def : Pat<(f64 (AArch64sqadd FPR64:$Rd,
                 (AArch64sqdmull FPR32:$Rn, FPR32:$Rm))),
           (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
@@ -6650,10 +6654,6 @@ def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
                 (int_aarch64_neon_sqdmulls_scalar FPR32:$Rn, FPR32:$Rm))),
           (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
 
-def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
-                                          (i32 FPR32:$Rm)),
-          (SQDMULLi32 FPR32:$Rn, FPR32:$Rm)>;
-
 //===----------------------------------------------------------------------===//
 // Advanced SIMD two scalar instructions.
 //===----------------------------------------------------------------------===//

>From 166ab03e83a93666ace5aff55ec99720e919f030 Mon Sep 17 00:00:00 2001
From: Josh Rodriguez <josh.rodriguez at arm.com>
Date: Wed, 1 Apr 2026 13:14:25 +0000
Subject: [PATCH 7/7] [AArch64][GlobalISel] Remove unneeded declare lines

---
 llvm/test/CodeGen/AArch64/arm64-vmul.ll | 2 --
 1 file changed, 2 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/arm64-vmul.ll b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
index 5b7eb6ca125a0..70c02a2a20353 100644
--- a/llvm/test/CodeGen/AArch64/arm64-vmul.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-vmul.ll
@@ -1835,8 +1835,6 @@ define i64 @sqdmlal_lane_1d_v2i32(i64 %A, i32 %B, <2 x i32> %C) nounwind {
   %res = call i64 @llvm.aarch64.neon.sqadd.i64(i64 %A, i64 %prod)
   ret i64 %res
 }
-declare i64 @llvm.aarch64.neon.sqdmulls.scalar(i32, i32)
-declare i64 @llvm.aarch64.neon.sqadd.i64(i64, i64)
 
 define i64 @sqdmlsl_lane_1d_v2i32(i64 %A, i32 %B, <2 x i32> %C) nounwind {
 ; CHECK-SD-LABEL: sqdmlsl_lane_1d_v2i32:



More information about the llvm-commits mailing list