[llvm] [AArch64] Extend usage of `XAR` instruction for fixed-length operations (PR #139460)
Rajveer Singh Bharadwaj via llvm-commits
llvm-commits at lists.llvm.org
Thu Jun 5 03:20:46 PDT 2025
https://github.com/Rajveer100 updated https://github.com/llvm/llvm-project/pull/139460
>From 7a9dfcb9990930a008786f3daa9f801a0bd9abe4 Mon Sep 17 00:00:00 2001
From: Rajveer <rajveer.developer at icloud.com>
Date: Sun, 11 May 2025 22:27:50 +0530
Subject: [PATCH] [AArch64] Extend usage of `XAR` instruction for fixed-length
operations
Resolves #139229
In #137162, support for `v2i64` was implemented for vector rotate
transformation, although types like `v4i32`, `v8i16` and `v16i8`
do not have Neon SHA3, we can use SVE operations if sve2-sha3
is available.
---
.../Target/AArch64/AArch64ISelDAGToDAG.cpp | 78 ++++++--
llvm/test/CodeGen/AArch64/xar.ll | 176 ++++++++++++++++--
2 files changed, 231 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 34f6db9374cb5..4bf6a8714ffd6 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -4606,7 +4606,36 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
return false;
}
- if (!Subtarget->hasSHA3())
+ // We have Neon SHA3 XAR operation for v2i64 but for types
+ // v4i32, v8i16, v16i8 we can use SVE operations when SVE2-SHA3
+ // is available.
+ EVT SVT;
+ switch (VT.getSimpleVT().SimpleTy) {
+ case MVT::v4i32:
+ SVT = MVT::nxv4i32;
+ break;
+ case MVT::v8i16:
+ SVT = MVT::nxv8i16;
+ break;
+ case MVT::v16i8:
+ SVT = MVT::nxv16i8;
+ break;
+ case MVT::v1i64:
+ case MVT::v2i32:
+ case MVT::v4i16:
+ case MVT::v8i8:
+ // Widen type to v2i64.
+ SVT = MVT::v2i64;
+ break;
+ default:
+ if (VT != MVT::v2i64)
+ return false;
+ SVT = MVT::v2i64;
+ break;
+ }
+
+ if ((!SVT.isScalableVector() && !Subtarget->hasSHA3()) ||
+ (SVT.isScalableVector() && !Subtarget->hasSVE2()))
return false;
if (N0->getOpcode() != AArch64ISD::VSHL ||
@@ -4632,41 +4661,68 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
SDValue Imm = CurDAG->getTargetConstant(
ShAmt, DL, N0.getOperand(1).getValueType(), false);
- if (ShAmt + HsAmt != 64)
+ unsigned VTSizeInBits = VT.getScalarSizeInBits();
+ if (ShAmt + HsAmt != VTSizeInBits)
return false;
if (!IsXOROperand) {
SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i64);
- SDNode *MOV =
- CurDAG->getMachineNode(AArch64::MOVIv2d_ns, DL, MVT::v2i64, Zero);
+ SDNode *MOV = CurDAG->getMachineNode(AArch64::MOVIv2d_ns, DL, SVT, Zero);
SDValue MOVIV = SDValue(MOV, 0);
+
R1 = N1->getOperand(0);
R2 = MOVIV;
}
- // If the input is a v1i64, widen to a v2i64 to use XAR.
- assert((VT == MVT::v1i64 || VT == MVT::v2i64) && "Unexpected XAR type!");
- if (VT == MVT::v1i64) {
- EVT SVT = MVT::v2i64;
+ if (SVT.isScalableVector()) {
+ SDValue Undef =
+ SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, SVT), 0);
+ SDValue ZSub = CurDAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
+
+ R1 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT, Undef,
+ R1, ZSub),
+ 0);
+ R2 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT, Undef,
+ R2, ZSub),
+ 0);
+ }
+
+ if (!SVT.isScalableVector() && SVT != VT) {
SDValue Undef =
SDValue(CurDAG->getMachineNode(AArch64::IMPLICIT_DEF, DL, SVT), 0);
SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
+
R1 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT, Undef,
R1, DSub),
0);
- if (R2.getValueType() == MVT::v1i64)
+ if (R2.getValueType() != SVT)
R2 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT,
Undef, R2, DSub),
0);
}
SDValue Ops[] = {R1, R2, Imm};
- SDNode *XAR = CurDAG->getMachineNode(AArch64::XAR, DL, MVT::v2i64, Ops);
+ SDNode *XAR = nullptr;
- if (VT == MVT::v1i64) {
+ if (SVT.isScalableVector()) {
+ if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::Int>(
+ SVT, {AArch64::XAR_ZZZI_B, AArch64::XAR_ZZZI_H, AArch64::XAR_ZZZI_S,
+ AArch64::XAR_ZZZI_D}))
+ XAR = CurDAG->getMachineNode(Opc, DL, VT, Ops);
+ } else {
+ XAR = CurDAG->getMachineNode(AArch64::XAR, DL, SVT, Ops);
+ }
+
+ assert(XAR && "Unexpected NULL value for XAR instruction in DAG");
+
+ if (!SVT.isScalableVector() && SVT != VT) {
SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
XAR = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, VT,
SDValue(XAR, 0), DSub);
+ } else if (SVT.isScalableVector()) {
+ SDValue ZSub = CurDAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
+ XAR = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, VT,
+ SDValue(XAR, 0), ZSub);
}
ReplaceNode(N, XAR);
return true;
diff --git a/llvm/test/CodeGen/AArch64/xar.ll b/llvm/test/CodeGen/AArch64/xar.ll
index d682f4f4a1bfb..25baaa9dc0846 100644
--- a/llvm/test/CodeGen/AArch64/xar.ll
+++ b/llvm/test/CodeGen/AArch64/xar.ll
@@ -1,6 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=aarch64 -mattr=+sha3 < %s | FileCheck --check-prefix=SHA3 %s
; RUN: llc -mtriple=aarch64 -mattr=-sha3 < %s | FileCheck --check-prefix=NOSHA3 %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s | FileCheck --check-prefix=SVE2SHA3 %s
+
+/* 128-bit vectors */
define <2 x i64> @xar(<2 x i64> %x, <2 x i64> %y) {
; SHA3-LABEL: xar:
@@ -39,14 +42,14 @@ define <1 x i64> @xar_v1i64(<1 x i64> %a, <1 x i64> %b) {
ret <1 x i64> %fshl
}
-define <2 x i64> @xar_instead_of_or1(<2 x i64> %r) {
-; SHA3-LABEL: xar_instead_of_or1:
+define <2 x i64> @xar_instead_of_or_v2i64(<2 x i64> %r) {
+; SHA3-LABEL: xar_instead_of_or_v2i64:
; SHA3: // %bb.0: // %entry
; SHA3-NEXT: movi v1.2d, #0000000000000000
; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #39
; SHA3-NEXT: ret
;
-; NOSHA3-LABEL: xar_instead_of_or1:
+; NOSHA3-LABEL: xar_instead_of_or_v2i64:
; NOSHA3: // %bb.0: // %entry
; NOSHA3-NEXT: shl v1.2d, v0.2d, #25
; NOSHA3-NEXT: usra v1.2d, v0.2d, #39
@@ -76,63 +79,212 @@ define <1 x i64> @xar_instead_of_or_v1i64(<1 x i64> %v.val) {
ret <1 x i64> %fshl
}
-define <4 x i32> @xar_instead_of_or2(<4 x i32> %r) {
-; SHA3-LABEL: xar_instead_of_or2:
+define <4 x i32> @xar_instead_of_or_v4i32(<4 x i32> %r) {
+; SHA3-LABEL: xar_instead_of_or_v4i32:
; SHA3: // %bb.0: // %entry
; SHA3-NEXT: shl v1.4s, v0.4s, #25
; SHA3-NEXT: usra v1.4s, v0.4s, #7
; SHA3-NEXT: mov v0.16b, v1.16b
; SHA3-NEXT: ret
;
-; NOSHA3-LABEL: xar_instead_of_or2:
+; NOSHA3-LABEL: xar_instead_of_or_v4i32:
; NOSHA3: // %bb.0: // %entry
; NOSHA3-NEXT: shl v1.4s, v0.4s, #25
; NOSHA3-NEXT: usra v1.4s, v0.4s, #7
; NOSHA3-NEXT: mov v0.16b, v1.16b
; NOSHA3-NEXT: ret
+;
+; SVE2SHA3-LABEL: xar_instead_of_or_v4i32:
+; SVE2SHA3: // %bb.0: // %entry
+; SVE2SHA3-NEXT: movi v1.2d, #0000000000000000
+; SVE2SHA3-NEXT: // kill: def $q0 killed $q0 def $z0
+; SVE2SHA3-NEXT: xar z0.s, z0.s, z1.s, #7
+; SVE2SHA3-NEXT: // kill: def $q0 killed $q0 killed $z0
+; SVE2SHA3-NEXT: ret
entry:
%or = call <4 x i32> @llvm.fshl.v2i32(<4 x i32> %r, <4 x i32> %r, <4 x i32> splat (i32 25))
ret <4 x i32> %or
}
-define <8 x i16> @xar_instead_of_or3(<8 x i16> %r) {
-; SHA3-LABEL: xar_instead_of_or3:
+define <8 x i16> @xar_instead_of_or_v8i16(<8 x i16> %r) {
+; SHA3-LABEL: xar_instead_of_or_v8i16:
; SHA3: // %bb.0: // %entry
; SHA3-NEXT: shl v1.8h, v0.8h, #9
; SHA3-NEXT: usra v1.8h, v0.8h, #7
; SHA3-NEXT: mov v0.16b, v1.16b
; SHA3-NEXT: ret
;
-; NOSHA3-LABEL: xar_instead_of_or3:
+; NOSHA3-LABEL: xar_instead_of_or_v8i16:
; NOSHA3: // %bb.0: // %entry
; NOSHA3-NEXT: shl v1.8h, v0.8h, #9
; NOSHA3-NEXT: usra v1.8h, v0.8h, #7
; NOSHA3-NEXT: mov v0.16b, v1.16b
; NOSHA3-NEXT: ret
+;
+; SVE2SHA3-LABEL: xar_instead_of_or_v8i16:
+; SVE2SHA3: // %bb.0: // %entry
+; SVE2SHA3-NEXT: movi v1.2d, #0000000000000000
+; SVE2SHA3-NEXT: // kill: def $q0 killed $q0 def $z0
+; SVE2SHA3-NEXT: xar z0.h, z0.h, z1.h, #7
+; SVE2SHA3-NEXT: // kill: def $q0 killed $q0 killed $z0
+; SVE2SHA3-NEXT: ret
entry:
%or = call <8 x i16> @llvm.fshl.v2i16(<8 x i16> %r, <8 x i16> %r, <8 x i16> splat (i16 25))
ret <8 x i16> %or
}
-define <16 x i8> @xar_instead_of_or4(<16 x i8> %r) {
-; SHA3-LABEL: xar_instead_of_or4:
+define <16 x i8> @xar_instead_of_or_v16i8(<16 x i8> %r) {
+; SHA3-LABEL: xar_instead_of_or_v16i8:
; SHA3: // %bb.0: // %entry
; SHA3-NEXT: add v1.16b, v0.16b, v0.16b
; SHA3-NEXT: usra v1.16b, v0.16b, #7
; SHA3-NEXT: mov v0.16b, v1.16b
; SHA3-NEXT: ret
;
-; NOSHA3-LABEL: xar_instead_of_or4:
+; NOSHA3-LABEL: xar_instead_of_or_v16i8:
; NOSHA3: // %bb.0: // %entry
; NOSHA3-NEXT: add v1.16b, v0.16b, v0.16b
; NOSHA3-NEXT: usra v1.16b, v0.16b, #7
; NOSHA3-NEXT: mov v0.16b, v1.16b
; NOSHA3-NEXT: ret
+;
+; SVE2SHA3-LABEL: xar_instead_of_or_v16i8:
+; SVE2SHA3: // %bb.0: // %entry
+; SVE2SHA3-NEXT: movi v1.2d, #0000000000000000
+; SVE2SHA3-NEXT: // kill: def $q0 killed $q0 def $z0
+; SVE2SHA3-NEXT: xar z0.b, z0.b, z1.b, #7
+; SVE2SHA3-NEXT: // kill: def $q0 killed $q0 killed $z0
+; SVE2SHA3-NEXT: ret
entry:
%or = call <16 x i8> @llvm.fshl.v2i8(<16 x i8> %r, <16 x i8> %r, <16 x i8> splat (i8 25))
ret <16 x i8> %or
}
+/* 64 bit vectors */
+
+define <2 x i32> @xar_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; SHA3-LABEL: xar_v2i32:
+; SHA3: // %bb.0: // %entry
+; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0
+; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1
+; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #7
+; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0
+; SHA3-NEXT: ret
+;
+; NOSHA3-LABEL: xar_v2i32:
+; NOSHA3: // %bb.0: // %entry
+; NOSHA3-NEXT: eor v1.8b, v0.8b, v1.8b
+; NOSHA3-NEXT: shl v0.2s, v1.2s, #25
+; NOSHA3-NEXT: usra v0.2s, v1.2s, #7
+; NOSHA3-NEXT: ret
+entry:
+ %a = xor <2 x i32> %x, %y
+ %b = call <2 x i32> @llvm.fshl(<2 x i32> %a, <2 x i32> %a, <2 x i32> <i32 25, i32 25>)
+ ret <2 x i32> %b
+}
+
+define <2 x i32> @xar_instead_of_or_v2i32(<2 x i32> %r) {
+; SHA3-LABEL: xar_instead_of_or_v2i32:
+; SHA3: // %bb.0: // %entry
+; SHA3-NEXT: movi v1.2d, #0000000000000000
+; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0
+; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #7
+; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0
+; SHA3-NEXT: ret
+;
+; NOSHA3-LABEL: xar_instead_of_or_v2i32:
+; NOSHA3: // %bb.0: // %entry
+; NOSHA3-NEXT: shl v1.2s, v0.2s, #25
+; NOSHA3-NEXT: usra v1.2s, v0.2s, #7
+; NOSHA3-NEXT: fmov d0, d1
+; NOSHA3-NEXT: ret
+entry:
+ %or = call <2 x i32> @llvm.fshl(<2 x i32> %r, <2 x i32> %r, <2 x i32> splat (i32 25))
+ ret <2 x i32> %or
+}
+
+define <4 x i16> @xar_v4i16(<4 x i16> %x, <4 x i16> %y) {
+; SHA3-LABEL: xar_v4i16:
+; SHA3: // %bb.0: // %entry
+; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0
+; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1
+; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #7
+; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0
+; SHA3-NEXT: ret
+;
+; NOSHA3-LABEL: xar_v4i16:
+; NOSHA3: // %bb.0: // %entry
+; NOSHA3-NEXT: eor v1.8b, v0.8b, v1.8b
+; NOSHA3-NEXT: shl v0.4h, v1.4h, #9
+; NOSHA3-NEXT: usra v0.4h, v1.4h, #7
+; NOSHA3-NEXT: ret
+entry:
+ %a = xor <4 x i16> %x, %y
+ %b = call <4 x i16> @llvm.fshl(<4 x i16> %a, <4 x i16> %a, <4 x i16> splat (i16 25))
+ ret <4 x i16> %b
+}
+
+define <4 x i16> @xar_instead_of_or_v4i16(<4 x i16> %r) {
+; SHA3-LABEL: xar_instead_of_or_v4i16:
+; SHA3: // %bb.0: // %entry
+; SHA3-NEXT: movi v1.2d, #0000000000000000
+; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0
+; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #7
+; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0
+; SHA3-NEXT: ret
+;
+; NOSHA3-LABEL: xar_instead_of_or_v4i16:
+; NOSHA3: // %bb.0: // %entry
+; NOSHA3-NEXT: shl v1.4h, v0.4h, #9
+; NOSHA3-NEXT: usra v1.4h, v0.4h, #7
+; NOSHA3-NEXT: fmov d0, d1
+; NOSHA3-NEXT: ret
+entry:
+ %or = call <4 x i16> @llvm.fshl(<4 x i16> %r, <4 x i16> %r, <4 x i16> splat (i16 25))
+ ret <4 x i16> %or
+}
+
+define <8 x i8> @xar_v8i8(<8 x i8> %x, <8 x i8> %y) {
+; SHA3-LABEL: xar_v8i8:
+; SHA3: // %bb.0: // %entry
+; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0
+; SHA3-NEXT: // kill: def $d1 killed $d1 def $q1
+; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #7
+; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0
+; SHA3-NEXT: ret
+;
+; NOSHA3-LABEL: xar_v8i8:
+; NOSHA3: // %bb.0: // %entry
+; NOSHA3-NEXT: eor v1.8b, v0.8b, v1.8b
+; NOSHA3-NEXT: add v0.8b, v1.8b, v1.8b
+; NOSHA3-NEXT: usra v0.8b, v1.8b, #7
+; NOSHA3-NEXT: ret
+entry:
+ %a = xor <8 x i8> %x, %y
+ %b = call <8 x i8> @llvm.fshl(<8 x i8> %a, <8 x i8> %a, <8 x i8> splat (i8 25))
+ ret <8 x i8> %b
+}
+
+define <8 x i8> @xar_instead_of_or_v8i8(<8 x i8> %r) {
+; SHA3-LABEL: xar_instead_of_or_v8i8:
+; SHA3: // %bb.0: // %entry
+; SHA3-NEXT: movi v1.2d, #0000000000000000
+; SHA3-NEXT: // kill: def $d0 killed $d0 def $q0
+; SHA3-NEXT: xar v0.2d, v0.2d, v1.2d, #7
+; SHA3-NEXT: // kill: def $d0 killed $d0 killed $q0
+; SHA3-NEXT: ret
+;
+; NOSHA3-LABEL: xar_instead_of_or_v8i8:
+; NOSHA3: // %bb.0: // %entry
+; NOSHA3-NEXT: add v1.8b, v0.8b, v0.8b
+; NOSHA3-NEXT: usra v1.8b, v0.8b, #7
+; NOSHA3-NEXT: fmov d0, d1
+; NOSHA3-NEXT: ret
+entry:
+ %or = call <8 x i8> @llvm.fshl(<8 x i8> %r, <8 x i8> %r, <8 x i8> splat (i8 25))
+ ret <8 x i8> %or
+}
+
declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)
More information about the llvm-commits
mailing list