[llvm] [AArch64][SVE] Detect MOV (imm, pred, zeroing/merging) (PR #116032)

Ricardo Jesus via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 14 07:59:16 PST 2024


https://github.com/rj-jesus updated https://github.com/llvm/llvm-project/pull/116032

>From 247bbdd5a5e1611699a7dbdccec326fc360cb226 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Wed, 13 Nov 2024 01:28:45 -0800
Subject: [PATCH 1/4] Precommit tests

---
 llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll | 95 +++++++++++++++++++
 1 file changed, 95 insertions(+)
 create mode 100644 llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll

diff --git a/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
new file mode 100644
index 00000000000000..27b5ea46896f8d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
+
+; Zeroing.
+
+define dso_local <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
+; CHECK-LABEL: mov_z_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.b, #0 // =0x0
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.b, p0/m, w8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 1)
+  ret <vscale x 16 x i8> %r
+}
+
+define dso_local <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
+; CHECK-LABEL: mov_z_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.h, #0 // =0x0
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.h, p0/m, w8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, i16 1)
+  ret <vscale x 8 x i16> %r
+}
+
+define dso_local <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
+; CHECK-LABEL: mov_z_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.s, #0 // =0x0
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.s, p0/m, w8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i32 1)
+  ret <vscale x 4 x i32> %r
+}
+
+define dso_local <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
+; CHECK-LABEL: mov_z_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov z0.d, #0 // =0x0
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.d, p0/m, x8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 1)
+  ret <vscale x 2 x i64> %r
+}
+
+; Merging.
+
+define dso_local <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg) {
+; CHECK-LABEL: mov_m_b:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.b, p0/m, w8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i8 1)
+  ret <vscale x 16 x i8> %r
+}
+
+define dso_local <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg) {
+; CHECK-LABEL: mov_m_h:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.h, p0/m, w8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i16 1)
+  ret <vscale x 8 x i16> %r
+}
+
+define dso_local <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg) {
+; CHECK-LABEL: mov_m_s:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.s, p0/m, w8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 1)
+  ret <vscale x 4 x i32> %r
+}
+
+define dso_local <vscale x 2 x i64> @mov_m_d(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg) {
+; CHECK-LABEL: mov_m_d:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #1 // =0x1
+; CHECK-NEXT:    mov z0.d, p0/m, x8
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg, i64 1)
+  ret <vscale x 2 x i64> %r
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)

>From 659ea54c3f155efc101523c89569dcc1f3e10c2f Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Wed, 13 Nov 2024 01:29:58 -0800
Subject: [PATCH 2/4] [AArch64][SVE] Detect MOV (imm, pred, zeroing/merging)

Add patterns to fold MOV (scalar, predicated) to MOV (imm, pred,
merging) or MOV (imm, pred, zeroing) as appropriate.
---
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td | 20 +++++++++++++
 llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll | 28 ++++++-------------
 2 files changed, 28 insertions(+), 20 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index c10653e05841cd..d0b4b71a93f641 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -892,6 +892,26 @@ let Predicates = [HasSVEorSME] in {
   def : Pat<(nxv2i64 (splat_vector (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)))),
             (DUP_ZI_D $a, $b)>;
 
+  // Duplicate Int immediate to active vector elements (zeroing).
+  def : Pat<(nxv16i8 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (SVEDup0Undef))),
+            (CPY_ZPzI_B $pg, $a, $b)>;
+  def : Pat<(nxv8i16 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (SVEDup0Undef))),
+            (CPY_ZPzI_H $pg, $a, $b)>;
+  def : Pat<(nxv4i32 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (SVEDup0Undef))),
+            (CPY_ZPzI_S $pg, $a, $b)>;
+  def : Pat<(nxv2i64 (AArch64dup_mt PPR:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (SVEDup0Undef))),
+            (CPY_ZPzI_D $pg, $a, $b)>;
+
+  // Duplicate Int immediate to active vector elements (merging).
+  def : Pat<(nxv16i8 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (nxv16i8 ZPR:$z))),
+            (CPY_ZPmI_B $z, $pg, $a, $b)>;
+  def : Pat<(nxv8i16 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (nxv8i16 ZPR:$z))),
+            (CPY_ZPmI_H $z, $pg, $a, $b)>;
+  def : Pat<(nxv4i32 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (nxv4i32 ZPR:$z))),
+            (CPY_ZPmI_S $z, $pg, $a, $b)>;
+  def : Pat<(nxv2i64 (AArch64dup_mt PPR:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (nxv2i64 ZPR:$z))),
+            (CPY_ZPmI_D $z, $pg, $a, $b)>;
+
   // Duplicate immediate FP into all vector elements.
   def : Pat<(nxv2f16 (splat_vector (f16 fpimm:$val))),
             (DUP_ZR_H (MOVi32imm (bitcast_fpimm_to_i32 f16:$val)))>;
diff --git a/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
index 27b5ea46896f8d..43be70c9590fb0 100644
--- a/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
+++ b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
@@ -6,9 +6,7 @@
 define dso_local <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
 ; CHECK-LABEL: mov_z_b:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.b, #0 // =0x0
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.b, p0/m, w8
+; CHECK-NEXT:    mov z0.b, p0/z, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i1> %pg, i8 1)
   ret <vscale x 16 x i8> %r
@@ -17,9 +15,7 @@ define dso_local <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
 define dso_local <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: mov_z_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.h, #0 // =0x0
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.h, p0/m, w8
+; CHECK-NEXT:    mov z0.h, p0/z, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i1> %pg, i16 1)
   ret <vscale x 8 x i16> %r
@@ -28,9 +24,7 @@ define dso_local <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
 define dso_local <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
 ; CHECK-LABEL: mov_z_s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.s, #0 // =0x0
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.s, p0/m, w8
+; CHECK-NEXT:    mov z0.s, p0/z, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> zeroinitializer, <vscale x 4 x i1> %pg, i32 1)
   ret <vscale x 4 x i32> %r
@@ -39,9 +33,7 @@ define dso_local <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
 define dso_local <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
 ; CHECK-LABEL: mov_z_d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov z0.d, #0 // =0x0
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.d, p0/m, x8
+; CHECK-NEXT:    mov z0.d, p0/z, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> %pg, i64 1)
   ret <vscale x 2 x i64> %r
@@ -52,8 +44,7 @@ define dso_local <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
 define dso_local <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg) {
 ; CHECK-LABEL: mov_m_b:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.b, p0/m, w8
+; CHECK-NEXT:    mov z0.b, p0/m, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 16 x i8> @llvm.aarch64.sve.dup.nxv16i8(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg, i8 1)
   ret <vscale x 16 x i8> %r
@@ -62,8 +53,7 @@ define dso_local <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 1
 define dso_local <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: mov_m_h:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.h, p0/m, w8
+; CHECK-NEXT:    mov z0.h, p0/m, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 8 x i16> @llvm.aarch64.sve.dup.nxv8i16(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg, i16 1)
   ret <vscale x 8 x i16> %r
@@ -72,8 +62,7 @@ define dso_local <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8
 define dso_local <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg) {
 ; CHECK-LABEL: mov_m_s:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.s, p0/m, w8
+; CHECK-NEXT:    mov z0.s, p0/m, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 4 x i32> @llvm.aarch64.sve.dup.nxv4i32(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg, i32 1)
   ret <vscale x 4 x i32> %r
@@ -82,8 +71,7 @@ define dso_local <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4
 define dso_local <vscale x 2 x i64> @mov_m_d(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg) {
 ; CHECK-LABEL: mov_m_d:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    mov w8, #1 // =0x1
-; CHECK-NEXT:    mov z0.d, p0/m, x8
+; CHECK-NEXT:    mov z0.d, p0/m, #1 // =0x1
 ; CHECK-NEXT:    ret
   %r = tail call <vscale x 2 x i64> @llvm.aarch64.sve.dup.nxv2i64(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg, i64 1)
   ret <vscale x 2 x i64> %r

>From bb39c643c3b96d768f6b43d9822657c9fd140bd5 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Wed, 13 Nov 2024 11:12:17 -0800
Subject: [PATCH 3/4] Move patterns into sve_int_dup_imm_pred_*

---
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td | 24 ++-----------------
 llvm/lib/Target/AArch64/SVEInstrFormats.td    | 22 +++++++++++++++--
 2 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index d0b4b71a93f641..a2867d2d912ef5 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -839,8 +839,8 @@ let Predicates = [HasSVEorSME] in {
   defm DUPM_ZI : sve_int_dup_mask_imm<"dupm">;
 
   // Splat immediate (predicated)
-  defm CPY_ZPmI  : sve_int_dup_imm_pred_merge<"cpy">;
-  defm CPY_ZPzI  : sve_int_dup_imm_pred_zero<"cpy">;
+  defm CPY_ZPmI  : sve_int_dup_imm_pred_merge<"cpy", AArch64dup_mt>;
+  defm CPY_ZPzI  : sve_int_dup_imm_pred_zero<"cpy", AArch64dup_mt>;
   defm FCPY_ZPmI : sve_int_dup_fpimm_pred<"fcpy">;
 
   // Splat scalar register (unpredicated, GPR or vector + element index)
@@ -892,26 +892,6 @@ let Predicates = [HasSVEorSME] in {
   def : Pat<(nxv2i64 (splat_vector (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)))),
             (DUP_ZI_D $a, $b)>;
 
-  // Duplicate Int immediate to active vector elements (zeroing).
-  def : Pat<(nxv16i8 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (SVEDup0Undef))),
-            (CPY_ZPzI_B $pg, $a, $b)>;
-  def : Pat<(nxv8i16 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (SVEDup0Undef))),
-            (CPY_ZPzI_H $pg, $a, $b)>;
-  def : Pat<(nxv4i32 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (SVEDup0Undef))),
-            (CPY_ZPzI_S $pg, $a, $b)>;
-  def : Pat<(nxv2i64 (AArch64dup_mt PPR:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (SVEDup0Undef))),
-            (CPY_ZPzI_D $pg, $a, $b)>;
-
-  // Duplicate Int immediate to active vector elements (merging).
-  def : Pat<(nxv16i8 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (nxv16i8 ZPR:$z))),
-            (CPY_ZPmI_B $z, $pg, $a, $b)>;
-  def : Pat<(nxv8i16 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (nxv8i16 ZPR:$z))),
-            (CPY_ZPmI_H $z, $pg, $a, $b)>;
-  def : Pat<(nxv4i32 (AArch64dup_mt PPR:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (nxv4i32 ZPR:$z))),
-            (CPY_ZPmI_S $z, $pg, $a, $b)>;
-  def : Pat<(nxv2i64 (AArch64dup_mt PPR:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (nxv2i64 ZPR:$z))),
-            (CPY_ZPmI_D $z, $pg, $a, $b)>;
-
   // Duplicate immediate FP into all vector elements.
   def : Pat<(nxv2f16 (splat_vector (f16 fpimm:$val))),
             (DUP_ZR_H (MOVi32imm (bitcast_fpimm_to_i32 f16:$val)))>;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 6de6aed3b2a816..60705e2b6d4e7d 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -5357,7 +5357,7 @@ multiclass sve_int_dup_imm_pred_merge_inst<
             (!cast<Instruction>(NAME) $Zd, $Pg, $imm, $shift)>;
 }
 
-multiclass sve_int_dup_imm_pred_merge<string asm> {
+multiclass sve_int_dup_imm_pred_merge<string asm, SDPatternOperator op> {
   defm _B : sve_int_dup_imm_pred_merge_inst<0b00, asm, ZPR8, cpy_imm8_opt_lsl_i8,
                                             nxv16i8, nxv16i1, i32, SVECpyDupImm8Pat>;
   defm _H : sve_int_dup_imm_pred_merge_inst<0b01, asm, ZPR16, cpy_imm8_opt_lsl_i16,
@@ -5386,6 +5386,15 @@ multiclass sve_int_dup_imm_pred_merge<string asm> {
             (!cast<Instruction>(NAME # _D) $Zd, $Pg, 0, 0)>;
   def : Pat<(vselect PPRAny:$Pg, (SVEDup0), (nxv2f64 ZPR:$Zd)),
             (!cast<Instruction>(NAME # _D) $Zd, $Pg, 0, 0)>;
+
+  def : Pat<(nxv16i8 (op nxv16i1:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), nxv16i8:$zd)),
+            (!cast<Instruction>(NAME # _B) $zd, $pg, $a, $b)>;
+  def : Pat<(nxv8i16 (op nxv8i1:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), nxv8i16:$zd)),
+            (!cast<Instruction>(NAME # _H) $zd, $pg, $a, $b)>;
+  def : Pat<(nxv4i32 (op nxv4i1:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), nxv4i32:$zd)),
+            (!cast<Instruction>(NAME # _S) $zd, $pg, $a, $b)>;
+  def : Pat<(nxv2i64 (op nxv2i1:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), nxv2i64:$zd)),
+            (!cast<Instruction>(NAME # _D) $zd, $pg, $a, $b)>;
 }
 
 multiclass sve_int_dup_imm_pred_zero_inst<
@@ -5407,7 +5416,7 @@ multiclass sve_int_dup_imm_pred_zero_inst<
             (!cast<Instruction>(NAME) $Pg, $imm, $shift)>;
 }
 
-multiclass sve_int_dup_imm_pred_zero<string asm> {
+multiclass sve_int_dup_imm_pred_zero<string asm, SDPatternOperator op> {
   defm _B : sve_int_dup_imm_pred_zero_inst<0b00, asm, ZPR8, cpy_imm8_opt_lsl_i8,
                                            nxv16i8, nxv16i1, i32, SVECpyDupImm8Pat>;
   defm _H : sve_int_dup_imm_pred_zero_inst<0b01, asm, ZPR16, cpy_imm8_opt_lsl_i16,
@@ -5416,6 +5425,15 @@ multiclass sve_int_dup_imm_pred_zero<string asm> {
                                            nxv4i32, nxv4i1, i32, SVECpyDupImm32Pat>;
   defm _D : sve_int_dup_imm_pred_zero_inst<0b11, asm, ZPR64, cpy_imm8_opt_lsl_i64,
                                            nxv2i64, nxv2i1, i64, SVECpyDupImm64Pat>;
+
+  def : Pat<(nxv16i8 (op nxv16i1:$pg, (i32 (SVECpyDupImm8Pat i32:$a, i32:$b)), (SVEDup0))),
+            (!cast<Instruction>(NAME # _B) $pg, $a, $b)>;
+  def : Pat<(nxv8i16 (op nxv8i1:$pg, (i32 (SVECpyDupImm16Pat i32:$a, i32:$b)), (SVEDup0))),
+            (!cast<Instruction>(NAME # _H) $pg, $a, $b)>;
+  def : Pat<(nxv4i32 (op nxv4i1:$pg, (i32 (SVECpyDupImm32Pat i32:$a, i32:$b)), (SVEDup0))),
+            (!cast<Instruction>(NAME # _S) $pg, $a, $b)>;
+  def : Pat<(nxv2i64 (op nxv2i1:$pg, (i64 (SVECpyDupImm64Pat i32:$a, i32:$b)), (SVEDup0))),
+            (!cast<Instruction>(NAME # _D) $pg, $a, $b)>;
 }
 
 //===----------------------------------------------------------------------===//

>From 4f383cfd572e37c0798e85c2ea33f072aa737d55 Mon Sep 17 00:00:00 2001
From: Ricardo Jesus <rjj at nvidia.com>
Date: Thu, 14 Nov 2024 07:49:42 -0800
Subject: [PATCH 4/4] Remove dso_local from tests

---
 llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
index 43be70c9590fb0..7f4ff927f7b65c 100644
--- a/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
+++ b/llvm/test/CodeGen/AArch64/sve-mov-imm-pred.ll
@@ -3,7 +3,7 @@
 
 ; Zeroing.
 
-define dso_local <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
+define <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
 ; CHECK-LABEL: mov_z_b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, p0/z, #1 // =0x1
@@ -12,7 +12,7 @@ define dso_local <vscale x 16 x i8> @mov_z_b(<vscale x 16 x i1> %pg) {
   ret <vscale x 16 x i8> %r
 }
 
-define dso_local <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
+define <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: mov_z_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, p0/z, #1 // =0x1
@@ -21,7 +21,7 @@ define dso_local <vscale x 8 x i16> @mov_z_h(<vscale x 8 x i1> %pg) {
   ret <vscale x 8 x i16> %r
 }
 
-define dso_local <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
+define <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
 ; CHECK-LABEL: mov_z_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, p0/z, #1 // =0x1
@@ -30,7 +30,7 @@ define dso_local <vscale x 4 x i32> @mov_z_s(<vscale x 4 x i1> %pg) {
   ret <vscale x 4 x i32> %r
 }
 
-define dso_local <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
+define <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
 ; CHECK-LABEL: mov_z_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, p0/z, #1 // =0x1
@@ -41,7 +41,7 @@ define dso_local <vscale x 2 x i64> @mov_z_d(<vscale x 2 x i1> %pg) {
 
 ; Merging.
 
-define dso_local <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg) {
+define <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 16 x i1> %pg) {
 ; CHECK-LABEL: mov_m_b:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.b, p0/m, #1 // =0x1
@@ -50,7 +50,7 @@ define dso_local <vscale x 16 x i8> @mov_m_b(<vscale x 16 x i8> %zd, <vscale x 1
   ret <vscale x 16 x i8> %r
 }
 
-define dso_local <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg) {
+define <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8 x i1> %pg) {
 ; CHECK-LABEL: mov_m_h:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.h, p0/m, #1 // =0x1
@@ -59,7 +59,7 @@ define dso_local <vscale x 8 x i16> @mov_m_h(<vscale x 8 x i16> %zd, <vscale x 8
   ret <vscale x 8 x i16> %r
 }
 
-define dso_local <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg) {
+define <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4 x i1> %pg) {
 ; CHECK-LABEL: mov_m_s:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.s, p0/m, #1 // =0x1
@@ -68,7 +68,7 @@ define dso_local <vscale x 4 x i32> @mov_m_s(<vscale x 4 x i32> %zd, <vscale x 4
   ret <vscale x 4 x i32> %r
 }
 
-define dso_local <vscale x 2 x i64> @mov_m_d(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg) {
+define <vscale x 2 x i64> @mov_m_d(<vscale x 2 x i64> %zd, <vscale x 2 x i1> %pg) {
 ; CHECK-LABEL: mov_m_d:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    mov z0.d, p0/m, #1 // =0x1



More information about the llvm-commits mailing list