[llvm] 95bbaca - [AArch64] Extend usage of `XAR` instruction for fixed-length operations (#139460)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 11 22:24:04 PDT 2025


Author: Rajveer Singh Bharadwaj
Date: 2025-06-12T10:54:01+05:30
New Revision: 95bbaca6c1dcabb03bd67aabe3aaa4730a11200d

URL: https://github.com/llvm/llvm-project/commit/95bbaca6c1dcabb03bd67aabe3aaa4730a11200d
DIFF: https://github.com/llvm/llvm-project/commit/95bbaca6c1dcabb03bd67aabe3aaa4730a11200d.diff

LOG: [AArch64] Extend usage of `XAR` instruction for fixed-length operations (#139460)

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
    llvm/test/CodeGen/AArch64/xar.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 11cb91fbe02d4..009d69b2b9433 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -4606,7 +4606,33 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
     return false;
   }
 
-  if (!Subtarget->hasSHA3())
+  // We have Neon SHA3 XAR operation for v2i64 but for types
+  // v4i32, v8i16, v16i8 we can use SVE operations when SVE2-SHA3
+  // is available.
+  EVT SVT;
+  switch (VT.getSimpleVT().SimpleTy) {
+  case MVT::v4i32:
+  case MVT::v2i32:
+    SVT = MVT::nxv4i32;
+    break;
+  case MVT::v8i16:
+  case MVT::v4i16:
+    SVT = MVT::nxv8i16;
+    break;
+  case MVT::v16i8:
+  case MVT::v8i8:
+    SVT = MVT::nxv16i8;
+    break;
+  case MVT::v2i64:
+  case MVT::v1i64:
+    SVT = Subtarget->hasSHA3() ? MVT::v2i64 : MVT::nxv2i64;
+    break;
+  default:
+    return false;
+  }
+
+  if ((!SVT.isScalableVector() && !Subtarget->hasSHA3()) ||
+      (SVT.isScalableVector() && !Subtarget->hasSVE2()))
     return false;
 
   if (N0->getOpcode() != AArch64ISD::VSHL ||
@@ -4632,7 +4658,8 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
   SDValue Imm = CurDAG->getTargetConstant(
       ShAmt, DL, N0.getOperand(1).getValueType(), false);
 
-  if (ShAmt + HsAmt != 64)
+  unsigned VTSizeInBits = VT.getScalarSizeInBits();
+  if (ShAmt + HsAmt != VTSizeInBits)
     return false;
 
   if (!IsXOROperand) {
@@ -4640,33 +4667,76 @@ bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {
     SDNode *MOV =
         CurDAG->getMachineNode(AArch64::MOVIv2d_ns, DL, MVT::v2i64, Zero);
     SDValue MOVIV = SDValue(MOV, 0);
+
     R1 = N1->getOperand(0);
     R2 = MOVIV;
   }
 
-  // If the input is a v1i64, widen to a v2i64 to use XAR.
-  assert((VT == MVT::v1i64 || VT == MVT::v2i64) && "Unexpected XAR type!");
-  if (VT == MVT::v1i64) {
-    EVT SVT = MVT::v2i64;
+  if (SVT != VT) {
     SDValue Undef =
-        SDValue(CurDAG->getMachineNode(AArch64::IMPLICIT_DEF, DL, SVT), 0);
-    SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
+        SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, SVT), 0);
+
+    if (SVT.isScalableVector() && VT.is64BitVector()) {
+      EVT QVT = VT.getDoubleNumVectorElementsVT(*CurDAG->getContext());
+
+      SDValue UndefQ = SDValue(
+          CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, QVT), 0);
+      SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
+
+      R1 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, QVT,
+                                          UndefQ, R1, DSub),
+                   0);
+      if (R2.getValueType() == VT)
+        R2 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, QVT,
+                                            UndefQ, R2, DSub),
+                     0);
+    }
+
+    SDValue SubReg = CurDAG->getTargetConstant(
+        (SVT.isScalableVector() ? AArch64::zsub : AArch64::dsub), DL, MVT::i32);
+
     R1 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT, Undef,
-                                        R1, DSub),
+                                        R1, SubReg),
                  0);
-    if (R2.getValueType() == MVT::v1i64)
+
+    if (SVT.isScalableVector() || R2.getValueType() != SVT)
       R2 = SDValue(CurDAG->getMachineNode(AArch64::INSERT_SUBREG, DL, SVT,
-                                          Undef, R2, DSub),
+                                          Undef, R2, SubReg),
                    0);
   }
 
   SDValue Ops[] = {R1, R2, Imm};
-  SDNode *XAR = CurDAG->getMachineNode(AArch64::XAR, DL, MVT::v2i64, Ops);
+  SDNode *XAR = nullptr;
+
+  if (SVT.isScalableVector()) {
+    if (auto Opc = SelectOpcodeFromVT<SelectTypeKind::Int>(
+            SVT, {AArch64::XAR_ZZZI_B, AArch64::XAR_ZZZI_H, AArch64::XAR_ZZZI_S,
+                  AArch64::XAR_ZZZI_D}))
+      XAR = CurDAG->getMachineNode(Opc, DL, SVT, Ops);
+  } else {
+    XAR = CurDAG->getMachineNode(AArch64::XAR, DL, SVT, Ops);
+  }
 
-  if (VT == MVT::v1i64) {
-    SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
-    XAR = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, VT,
-                                 SDValue(XAR, 0), DSub);
+  assert(XAR && "Unexpected NULL value for XAR instruction in DAG");
+
+  if (SVT != VT) {
+    if (VT.is64BitVector() && SVT.isScalableVector()) {
+      EVT QVT = VT.getDoubleNumVectorElementsVT(*CurDAG->getContext());
+
+      SDValue ZSub = CurDAG->getTargetConstant(AArch64::zsub, DL, MVT::i32);
+      SDNode *Q = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, QVT,
+                                         SDValue(XAR, 0), ZSub);
+
+      SDValue DSub = CurDAG->getTargetConstant(AArch64::dsub, DL, MVT::i32);
+      XAR = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, VT,
+                                   SDValue(Q, 0), DSub);
+    } else {
+      SDValue SubReg = CurDAG->getTargetConstant(
+          (SVT.isScalableVector() ? AArch64::zsub : AArch64::dsub), DL,
+          MVT::i32);
+      XAR = CurDAG->getMachineNode(AArch64::EXTRACT_SUBREG, DL, VT,
+                                   SDValue(XAR, 0), SubReg);
+    }
   }
   ReplaceNode(N, XAR);
   return true;

diff  --git a/llvm/test/CodeGen/AArch64/xar.ll b/llvm/test/CodeGen/AArch64/xar.ll
index d682f4f4a1bfb..652617b58eaf3 100644
--- a/llvm/test/CodeGen/AArch64/xar.ll
+++ b/llvm/test/CodeGen/AArch64/xar.ll
@@ -1,6 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
 ; RUN: llc -mtriple=aarch64 -mattr=+sha3 < %s | FileCheck --check-prefix=SHA3 %s
 ; RUN: llc -mtriple=aarch64 -mattr=-sha3 < %s | FileCheck --check-prefix=NOSHA3 %s
+; RUN: llc -mtriple=aarch64 -mattr=+sve2 < %s | FileCheck --check-prefix=SVE2 %s
+
+/* 128-bit vectors */
 
 define <2 x i64> @xar(<2 x i64> %x, <2 x i64> %y) {
 ; SHA3-LABEL: xar:
@@ -14,6 +17,14 @@ define <2 x i64> @xar(<2 x i64> %x, <2 x i64> %y) {
 ; NOSHA3-NEXT:    shl v0.2d, v1.2d, #10
 ; NOSHA3-NEXT:    usra v0.2d, v1.2d, #54
 ; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2-NEXT:    // kill: def $q1 killed $q1 def $z1
+; SVE2-NEXT:    xar z0.d, z0.d, z1.d, #54
+; SVE2-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2-NEXT:    ret
     %a = xor <2 x i64> %x, %y
     %b = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %a, <2 x i64> %a, <2 x i64> <i64 10, i64 10>)
     ret <2 x i64> %b
@@ -34,24 +45,40 @@ define <1 x i64> @xar_v1i64(<1 x i64> %a, <1 x i64> %b) {
 ; NOSHA3-NEXT:    shl d0, d1, #1
 ; NOSHA3-NEXT:    usra d0, d1, #63
 ; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_v1i64:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    // kill: def $d1 killed $d1 def $z1
+; SVE2-NEXT:    xar z0.d, z0.d, z1.d, #63
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
   %v.val = xor <1 x i64> %a, %b
   %fshl = tail call <1 x i64> @llvm.fshl.v1i64(<1 x i64> %v.val, <1 x i64> %v.val, <1 x i64> splat (i64 1))
   ret <1 x i64> %fshl
 }
 
-define <2 x i64> @xar_instead_of_or1(<2 x i64> %r) {
-; SHA3-LABEL: xar_instead_of_or1:
+define <2 x i64> @xar_instead_of_or_v2i64(<2 x i64> %r) {
+; SHA3-LABEL: xar_instead_of_or_v2i64:
 ; SHA3:       // %bb.0: // %entry
 ; SHA3-NEXT:    movi v1.2d, #0000000000000000
 ; SHA3-NEXT:    xar v0.2d, v0.2d, v1.2d, #39
 ; SHA3-NEXT:    ret
 ;
-; NOSHA3-LABEL: xar_instead_of_or1:
+; NOSHA3-LABEL: xar_instead_of_or_v2i64:
 ; NOSHA3:       // %bb.0: // %entry
 ; NOSHA3-NEXT:    shl v1.2d, v0.2d, #25
 ; NOSHA3-NEXT:    usra v1.2d, v0.2d, #39
 ; NOSHA3-NEXT:    mov v0.16b, v1.16b
 ; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v2i64:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2-NEXT:    xar z0.d, z0.d, z1.d, #39
+; SVE2-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2-NEXT:    ret
 entry:
   %or = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %r, <2 x i64> %r, <2 x i64> splat (i64 25))
   ret <2 x i64> %or
@@ -72,67 +99,266 @@ define <1 x i64> @xar_instead_of_or_v1i64(<1 x i64> %v.val) {
 ; NOSHA3-NEXT:    usra d1, d0, #63
 ; NOSHA3-NEXT:    fmov d0, d1
 ; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v1i64:
+; SVE2:       // %bb.0:
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    xar z0.d, z0.d, z1.d, #63
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
   %fshl = tail call <1 x i64> @llvm.fshl.v1i64(<1 x i64> %v.val, <1 x i64> %v.val, <1 x i64> splat (i64 1))
   ret <1 x i64> %fshl
 }
 
-define <4 x i32> @xar_instead_of_or2(<4 x i32> %r) {
-; SHA3-LABEL: xar_instead_of_or2:
+define <4 x i32> @xar_instead_of_or_v4i32(<4 x i32> %r) {
+; SHA3-LABEL: xar_instead_of_or_v4i32:
 ; SHA3:       // %bb.0: // %entry
 ; SHA3-NEXT:    shl v1.4s, v0.4s, #25
 ; SHA3-NEXT:    usra v1.4s, v0.4s, #7
 ; SHA3-NEXT:    mov v0.16b, v1.16b
 ; SHA3-NEXT:    ret
 ;
-; NOSHA3-LABEL: xar_instead_of_or2:
+; NOSHA3-LABEL: xar_instead_of_or_v4i32:
 ; NOSHA3:       // %bb.0: // %entry
 ; NOSHA3-NEXT:    shl v1.4s, v0.4s, #25
 ; NOSHA3-NEXT:    usra v1.4s, v0.4s, #7
 ; NOSHA3-NEXT:    mov v0.16b, v1.16b
 ; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v4i32:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2-NEXT:    xar z0.s, z0.s, z1.s, #7
+; SVE2-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2-NEXT:    ret
 entry:
   %or = call <4 x i32> @llvm.fshl.v2i32(<4 x i32> %r, <4 x i32> %r, <4 x i32> splat (i32 25))
   ret <4 x i32> %or
 }
 
-define <8 x i16> @xar_instead_of_or3(<8 x i16> %r) {
-; SHA3-LABEL: xar_instead_of_or3:
+define <8 x i16> @xar_instead_of_or_v8i16(<8 x i16> %r) {
+; SHA3-LABEL: xar_instead_of_or_v8i16:
 ; SHA3:       // %bb.0: // %entry
 ; SHA3-NEXT:    shl v1.8h, v0.8h, #9
 ; SHA3-NEXT:    usra v1.8h, v0.8h, #7
 ; SHA3-NEXT:    mov v0.16b, v1.16b
 ; SHA3-NEXT:    ret
 ;
-; NOSHA3-LABEL: xar_instead_of_or3:
+; NOSHA3-LABEL: xar_instead_of_or_v8i16:
 ; NOSHA3:       // %bb.0: // %entry
 ; NOSHA3-NEXT:    shl v1.8h, v0.8h, #9
 ; NOSHA3-NEXT:    usra v1.8h, v0.8h, #7
 ; NOSHA3-NEXT:    mov v0.16b, v1.16b
 ; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v8i16:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2-NEXT:    xar z0.h, z0.h, z1.h, #7
+; SVE2-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2-NEXT:    ret
 entry:
   %or = call <8 x i16> @llvm.fshl.v2i16(<8 x i16> %r, <8 x i16> %r, <8 x i16> splat (i16 25))
   ret <8 x i16> %or
 }
 
-define <16 x i8> @xar_instead_of_or4(<16 x i8> %r) {
-; SHA3-LABEL: xar_instead_of_or4:
+define <16 x i8> @xar_instead_of_or_v16i8(<16 x i8> %r) {
+; SHA3-LABEL: xar_instead_of_or_v16i8:
 ; SHA3:       // %bb.0: // %entry
 ; SHA3-NEXT:    add v1.16b, v0.16b, v0.16b
 ; SHA3-NEXT:    usra v1.16b, v0.16b, #7
 ; SHA3-NEXT:    mov v0.16b, v1.16b
 ; SHA3-NEXT:    ret
 ;
-; NOSHA3-LABEL: xar_instead_of_or4:
+; NOSHA3-LABEL: xar_instead_of_or_v16i8:
 ; NOSHA3:       // %bb.0: // %entry
 ; NOSHA3-NEXT:    add v1.16b, v0.16b, v0.16b
 ; NOSHA3-NEXT:    usra v1.16b, v0.16b, #7
 ; NOSHA3-NEXT:    mov v0.16b, v1.16b
 ; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v16i8:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $q0 killed $q0 def $z0
+; SVE2-NEXT:    xar z0.b, z0.b, z1.b, #7
+; SVE2-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; SVE2-NEXT:    ret
 entry:
   %or = call <16 x i8> @llvm.fshl.v2i8(<16 x i8> %r, <16 x i8> %r, <16 x i8> splat (i8 25))
   ret <16 x i8> %or
 }
 
+/* 64 bit vectors */
+
+define <2 x i32> @xar_v2i32(<2 x i32> %x, <2 x i32> %y) {
+; SHA3-LABEL: xar_v2i32:
+; SHA3:       // %bb.0: // %entry
+; SHA3-NEXT:    eor v1.8b, v0.8b, v1.8b
+; SHA3-NEXT:    shl v0.2s, v1.2s, #25
+; SHA3-NEXT:    usra v0.2s, v1.2s, #7
+; SHA3-NEXT:    ret
+;
+; NOSHA3-LABEL: xar_v2i32:
+; NOSHA3:       // %bb.0: // %entry
+; NOSHA3-NEXT:    eor v1.8b, v0.8b, v1.8b
+; NOSHA3-NEXT:    shl v0.2s, v1.2s, #25
+; NOSHA3-NEXT:    usra v0.2s, v1.2s, #7
+; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_v2i32:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    // kill: def $d1 killed $d1 def $z1
+; SVE2-NEXT:    xar z0.s, z0.s, z1.s, #7
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
+entry:
+  %a = xor <2 x i32> %x, %y
+  %b = call <2 x i32> @llvm.fshl(<2 x i32> %a, <2 x i32> %a, <2 x i32> <i32 25, i32 25>)
+  ret <2 x i32> %b
+}
+
+define <2 x i32> @xar_instead_of_or_v2i32(<2 x i32> %r) {
+; SHA3-LABEL: xar_instead_of_or_v2i32:
+; SHA3:       // %bb.0: // %entry
+; SHA3-NEXT:    shl v1.2s, v0.2s, #25
+; SHA3-NEXT:    usra v1.2s, v0.2s, #7
+; SHA3-NEXT:    fmov d0, d1
+; SHA3-NEXT:    ret
+;
+; NOSHA3-LABEL: xar_instead_of_or_v2i32:
+; NOSHA3:       // %bb.0: // %entry
+; NOSHA3-NEXT:    shl v1.2s, v0.2s, #25
+; NOSHA3-NEXT:    usra v1.2s, v0.2s, #7
+; NOSHA3-NEXT:    fmov d0, d1
+; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v2i32:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    xar z0.s, z0.s, z1.s, #7
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
+entry:
+  %or = call <2 x i32> @llvm.fshl(<2 x i32> %r, <2 x i32> %r, <2 x i32> splat (i32 25))
+  ret <2 x i32> %or
+}
+
+define <4 x i16> @xar_v4i16(<4 x i16> %x, <4 x i16> %y) {
+; SHA3-LABEL: xar_v4i16:
+; SHA3:       // %bb.0: // %entry
+; SHA3-NEXT:    eor v1.8b, v0.8b, v1.8b
+; SHA3-NEXT:    shl v0.4h, v1.4h, #9
+; SHA3-NEXT:    usra v0.4h, v1.4h, #7
+; SHA3-NEXT:    ret
+;
+; NOSHA3-LABEL: xar_v4i16:
+; NOSHA3:       // %bb.0: // %entry
+; NOSHA3-NEXT:    eor v1.8b, v0.8b, v1.8b
+; NOSHA3-NEXT:    shl v0.4h, v1.4h, #9
+; NOSHA3-NEXT:    usra v0.4h, v1.4h, #7
+; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_v4i16:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    // kill: def $d1 killed $d1 def $z1
+; SVE2-NEXT:    xar z0.h, z0.h, z1.h, #7
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
+entry:
+  %a = xor <4 x i16> %x, %y
+  %b = call <4 x i16> @llvm.fshl(<4 x i16> %a, <4 x i16> %a, <4 x i16> splat (i16 25))
+  ret <4 x i16> %b
+}
+
+define <4 x i16> @xar_instead_of_or_v4i16(<4 x i16> %r) {
+; SHA3-LABEL: xar_instead_of_or_v4i16:
+; SHA3:       // %bb.0: // %entry
+; SHA3-NEXT:    shl v1.4h, v0.4h, #9
+; SHA3-NEXT:    usra v1.4h, v0.4h, #7
+; SHA3-NEXT:    fmov d0, d1
+; SHA3-NEXT:    ret
+;
+; NOSHA3-LABEL: xar_instead_of_or_v4i16:
+; NOSHA3:       // %bb.0: // %entry
+; NOSHA3-NEXT:    shl v1.4h, v0.4h, #9
+; NOSHA3-NEXT:    usra v1.4h, v0.4h, #7
+; NOSHA3-NEXT:    fmov d0, d1
+; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v4i16:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    xar z0.h, z0.h, z1.h, #7
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
+entry:
+  %or = call <4 x i16> @llvm.fshl(<4 x i16> %r, <4 x i16> %r, <4 x i16> splat (i16 25))
+  ret <4 x i16> %or
+}
+
+define <8 x i8> @xar_v8i8(<8 x i8> %x, <8 x i8> %y) {
+; SHA3-LABEL: xar_v8i8:
+; SHA3:       // %bb.0: // %entry
+; SHA3-NEXT:    eor v1.8b, v0.8b, v1.8b
+; SHA3-NEXT:    add v0.8b, v1.8b, v1.8b
+; SHA3-NEXT:    usra v0.8b, v1.8b, #7
+; SHA3-NEXT:    ret
+;
+; NOSHA3-LABEL: xar_v8i8:
+; NOSHA3:       // %bb.0: // %entry
+; NOSHA3-NEXT:    eor v1.8b, v0.8b, v1.8b
+; NOSHA3-NEXT:    add v0.8b, v1.8b, v1.8b
+; NOSHA3-NEXT:    usra v0.8b, v1.8b, #7
+; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_v8i8:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    // kill: def $d1 killed $d1 def $z1
+; SVE2-NEXT:    xar z0.b, z0.b, z1.b, #7
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
+entry:
+  %a = xor <8 x i8> %x, %y
+  %b = call <8 x i8> @llvm.fshl(<8 x i8> %a, <8 x i8> %a, <8 x i8> splat (i8 25))
+  ret <8 x i8> %b
+}
+
+define <8 x i8> @xar_instead_of_or_v8i8(<8 x i8> %r) {
+; SHA3-LABEL: xar_instead_of_or_v8i8:
+; SHA3:       // %bb.0: // %entry
+; SHA3-NEXT:    add v1.8b, v0.8b, v0.8b
+; SHA3-NEXT:    usra v1.8b, v0.8b, #7
+; SHA3-NEXT:    fmov d0, d1
+; SHA3-NEXT:    ret
+;
+; NOSHA3-LABEL: xar_instead_of_or_v8i8:
+; NOSHA3:       // %bb.0: // %entry
+; NOSHA3-NEXT:    add v1.8b, v0.8b, v0.8b
+; NOSHA3-NEXT:    usra v1.8b, v0.8b, #7
+; NOSHA3-NEXT:    fmov d0, d1
+; NOSHA3-NEXT:    ret
+;
+; SVE2-LABEL: xar_instead_of_or_v8i8:
+; SVE2:       // %bb.0: // %entry
+; SVE2-NEXT:    movi v1.2d, #0000000000000000
+; SVE2-NEXT:    // kill: def $d0 killed $d0 def $z0
+; SVE2-NEXT:    xar z0.b, z0.b, z1.b, #7
+; SVE2-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; SVE2-NEXT:    ret
+entry:
+  %or = call <8 x i8> @llvm.fshl(<8 x i8> %r, <8 x i8> %r, <8 x i8> splat (i8 25))
+  ret <8 x i8> %or
+}
+
 declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>)
 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
 declare <8 x i16> @llvm.fshl.v8i16(<8 x i16>, <8 x i16>, <8 x i16>)


        


More information about the llvm-commits mailing list