[llvm] [AArch64] Extend usage of `XAR` instruction for fixed-length operations (PR #139460)

Rajveer Singh Bharadwaj via llvm-commits llvm-commits at lists.llvm.org
Fri May 30 05:27:57 PDT 2025


https://github.com/Rajveer100 updated https://github.com/llvm/llvm-project/pull/139460

>From 404c9199ac1c5d4ae6489cdb87ceb27da26a73eb Mon Sep 17 00:00:00 2001
From: Rajveer <rajveer.developer at icloud.com>
Date: Sun, 11 May 2025 22:27:50 +0530
Subject: [PATCH] [AArch64] Extend usage of `XAR` instruction for fixed-length
 operations

Resolves #139229

In #137162, support for `v2i64` was implemented for vector rotate
transformation, although types like `v4i32`, `v8i16` and `v16i8`
do not have Neon SHA3, we can use SVE operations if sve2-sha3
is available.
---
 .../Target/AArch64/AArch64ISelDAGToDAG.cpp    | 61 +++++++++++++++++--
 llvm/test/CodeGen/AArch64/xar.ll              | 25 ++++++++
 2 files changed, 80 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 34f6db9374cb5..290fb3b43427d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -4632,22 +4632,55 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
   SDValue Imm = CurDAG->getTargetConstant(
       ShAmt, DL, N0.getOperand(1).getValueType(), false);
 
-  if (ShAmt + HsAmt != 64)
+  unsigned VTSizeInBits =
+      (Subtarget->hasSVE2() ? VT.getScalarSizeInBits() : 64);
+  if (ShAmt + HsAmt != VTSizeInBits)
     return false;
 
+  // We have Neon SHA3 XAR operation for v2i64 but for types
+  // v4i32, v8i16, v16i8 we can use SVE operations when SVE2-SHA3
+  // is available.
+  EVT SVT = MVT::v2i64;
+  switch (VT.getSimpleVT().SimpleTy) {
+  case MVT::v4i32:
+    SVT = MVT::nxv4i32;
+    break;
+  case MVT::v8i16:
+    SVT = MVT::nxv8i16;
+    break;
+  case MVT::v16i8:
+    SVT = MVT::nxv16i8;
+    break;
+  default:
+    if (!(VT == MVT::v2i64 || VT == MVT::v1i64))
+      return false;
+    break;
+  }
+
   if (!IsXOROperand) {
     SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i64);
-    SDNode *MOV =
-        CurDAG->getMachineNode(AArch64::MOVIv2d_ns, DL, MVT::v2i64, Zero);
+    SDNode *MOV = CurDAG->getMachineNode(AArch64::MOVIv2d_ns, DL, SVT, Zero);
     SDValue MOVIV = SDValue(MOV, 0);
+
     R1 = N1->getOperand(0);
     R2 = MOVIV;
   }
 
+  if (SVT != VT && VT != MVT::v1i64) {
+    SDValue Undef =
+        SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, SVT), 0);
+    SDValue ZSub = CurDAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
+
+    R1 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT, Undef,
+                                        R1, ZSub),
+                 0);
+    R2 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT, Undef,
+                                        R2, ZSub),
+                 0);
+  }
+
   // If the input is a v1i64, widen to a v2i64 to use XAR.
-  assert((VT == MVT::v1i64 || VT == MVT::v2i64) && "Unexpected XAR type!");
   if (VT == MVT::v1i64) {
-    EVT SVT = MVT::v2i64;
     SDValue Undef =
         SDValue(CurDAG->getMachineNode(AArch64::IMPLICIT_DEF, DL, SVT), 0);
     SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
@@ -4661,12 +4694,28 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
   }
 
   SDValue Ops[] = {R1, R2, Imm};
-  SDNode *XAR = CurDAG->getMachineNode(AArch64::XAR, DL, MVT::v2i64, Ops);
+  SDNode *XAR = nullptr;
+
+  if (SVT != VT && VT != MVT::v1i64) {
+    if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::Int>(
+            SVT, {AArch64::XAR_ZZZI_B, AArch64::XAR_ZZZI_H, AArch64::XAR_ZZZI_S,
+                  AArch64::XAR_ZZZI_D}))
+      XAR = CurDAG->getMachineNode(Opc, DL, VT, Ops);
+  } else {
+    XAR = CurDAG->getMachineNode(AArch64::XAR, DL, MVT::v2i64, Ops);
+  }
+
+  if (!XAR)
+    return false;
 
   if (VT == MVT::v1i64) {
     SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
     XAR = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, VT,
                                  SDValue(XAR, 0), DSub);
+  } else if (SVT != VT) {
+    SDValue ZSub = CurDAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
+    XAR = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, VT,
+                                 SDValue(XAR, 0), ZSub);
   }
   ReplaceNode(N, XAR);
   return true;
diff --git a/llvm/test/CodeGen/AArch64/xar.ll b/llvm/test/CodeGen/AArch64/xar.ll
index d682f4f4a1bfb..37450c7854a19 100644
--- a/llvm/test/CodeGen/AArch64/xar.ll
+++ b/llvm/test/CodeGen/AArch64/xar.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64 -mattr=+sha3 < %s | FileCheck --check-prefix=SHA3 %s
 ; RUN: llc -mtriple=aarch64 -mattr=-sha3 < %s | FileCheck --check-prefix=NOSHA3 %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve2,sve2-sha3,+sha3 < %s | FileCheck --check-prefix=SVE2SHA3 %s
 
 define <2 x i64> @xar(<2 x i64> %x, <2 x i64> %y) {
 ; SHA3-LABEL: xar:
@@ -90,6 +91,14 @@ define <4 x i32> @xar_instead_of_or2(<4 x i32> %r) {
 ; NOSHA3-NEXT:    usra v1.4s, v0.4s, #7
 ; NOSHA3-NEXT:    mov v0.16b, v1.16b
 ; NOSHA3-NEXT:    ret
+;
+; SVE2SHA3-LABEL: xar_instead_of_or2:
+; SVE2SHA3:       // %bb.0: // %entry
+; SVE2SHA3-NEXT:    movi v1.2d, #0000000000000000
+; SVE2SHA3-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2SHA3-NEXT:    xar z0.s, z0.s, z1.s, #7
+; SVE2SHA3-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2SHA3-NEXT:    ret
 entry:
   %or = call <4 x i32> @llvm.fshl.v2i32(<4 x i32> %r, <4 x i32> %r, <4 x i32> splat (i32 25))
   ret <4 x i32> %or
@@ -109,6 +118,14 @@ define <8 x i16> @xar_instead_of_or3(<8 x i16> %r) {
 ; NOSHA3-NEXT:    usra v1.8h, v0.8h, #7
 ; NOSHA3-NEXT:    mov v0.16b, v1.16b
 ; NOSHA3-NEXT:    ret
+;
+; SVE2SHA3-LABEL: xar_instead_of_or3:
+; SVE2SHA3:       // %bb.0: // %entry
+; SVE2SHA3-NEXT:    movi v1.2d, #0000000000000000
+; SVE2SHA3-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2SHA3-NEXT:    xar z0.h, z0.h, z1.h, #7
+; SVE2SHA3-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2SHA3-NEXT:    ret
 entry:
   %or = call <8 x i16> @llvm.fshl.v2i16(<8 x i16> %r, <8 x i16> %r, <8 x i16> splat (i16 25))
   ret <8 x i16> %or
@@ -128,6 +145,14 @@ define <16 x i8> @xar_instead_of_or4(<16 x i8> %r) {
 ; NOSHA3-NEXT:    usra v1.16b, v0.16b, #7
 ; NOSHA3-NEXT:    mov v0.16b, v1.16b
 ; NOSHA3-NEXT:    ret
+;
+; SVE2SHA3-LABEL: xar_instead_of_or4:
+; SVE2SHA3:       // %bb.0: // %entry
+; SVE2SHA3-NEXT:    movi v1.2d, #0000000000000000
+; SVE2SHA3-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2SHA3-NEXT:    xar z0.b, z0.b, z1.b, #7
+; SVE2SHA3-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2SHA3-NEXT:    ret
 entry:
   %or = call <16 x i8> @llvm.fshl.v2i8(<16 x i8> %r, <16 x i8> %r, <16 x i8> splat (i8 25))
   ret <16 x i8> %or



More information about the llvm-commits mailing list