[llvm] 8eec729 - [SVE] Lower vector BITREVERSE and BSWAP operations.

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 22 08:51:15 PST 2020


Author: Paul Walker
Date: 2020-12-22T16:49:50Z
New Revision: 8eec7294fea87273215592a2dc5bee6afd47d456

URL: https://github.com/llvm/llvm-project/commit/8eec7294fea87273215592a2dc5bee6afd47d456
DIFF: https://github.com/llvm/llvm-project/commit/8eec7294fea87273215592a2dc5bee6afd47d456.diff

LOG: [SVE] Lower vector BITREVERSE and BSWAP operations.

These operations are lowered to RBIT and REVB instructions
respectively.  In the case of fixed-length support using SVE we
also lower BITREVERSE operating on NEON sized vectors as this
results in fewer instructions.

Differential Revision: https://reviews.llvm.org/D93606

Added: 
    llvm/test/CodeGen/AArch64/sve-fixed-length-rev.ll
    llvm/test/CodeGen/AArch64/sve-rev.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/lib/Target/AArch64/SVEInstrFormats.td
    llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index e74bc739ddaf..48fbea840bad 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -182,6 +182,8 @@ static bool isMergePassthruOpcode(unsigned Opc) {
   switch (Opc) {
   default:
     return false;
+  case AArch64ISD::BITREVERSE_MERGE_PASSTHRU:
+  case AArch64ISD::BSWAP_MERGE_PASSTHRU:
   case AArch64ISD::DUP_MERGE_PASSTHRU:
   case AArch64ISD::FNEG_MERGE_PASSTHRU:
   case AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU:
@@ -1066,6 +1068,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
     // splat of 0 or undef) once vector selects supported in SVE codegen. See
     // D68877 for more details.
     for (auto VT : {MVT::nxv16i8, MVT::nxv8i16, MVT::nxv4i32, MVT::nxv2i64}) {
+      setOperationAction(ISD::BITREVERSE, VT, Custom);
+      setOperationAction(ISD::BSWAP, VT, Custom);
       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
       setOperationAction(ISD::UINT_TO_FP, VT, Custom);
       setOperationAction(ISD::SINT_TO_FP, VT, Custom);
@@ -1183,6 +1187,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
         setOperationAction(ISD::FP_ROUND, VT, Expand);
 
       // These operations are not supported on NEON but SVE can do them.
+      setOperationAction(ISD::BITREVERSE, MVT::v1i64, Custom);
       setOperationAction(ISD::MUL, MVT::v1i64, Custom);
       setOperationAction(ISD::MUL, MVT::v2i64, Custom);
       setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
@@ -1217,6 +1222,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       // Int operations with no NEON support.
       for (auto VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16,
                       MVT::v2i32, MVT::v4i32, MVT::v2i64}) {
+        setOperationAction(ISD::BITREVERSE, VT, Custom);
         setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
         setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
         setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
@@ -1330,6 +1336,8 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   setOperationAction(ISD::ADD, VT, Custom);
   setOperationAction(ISD::AND, VT, Custom);
   setOperationAction(ISD::ANY_EXTEND, VT, Custom);
+  setOperationAction(ISD::BITREVERSE, VT, Custom);
+  setOperationAction(ISD::BSWAP, VT, Custom);
   setOperationAction(ISD::FADD, VT, Custom);
   setOperationAction(ISD::FCEIL, VT, Custom);
   setOperationAction(ISD::FDIV, VT, Custom);
@@ -1934,6 +1942,8 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
     MAKE_CASE(AArch64ISD::LDP)
     MAKE_CASE(AArch64ISD::STP)
     MAKE_CASE(AArch64ISD::STNP)
+    MAKE_CASE(AArch64ISD::BITREVERSE_MERGE_PASSTHRU)
+    MAKE_CASE(AArch64ISD::BSWAP_MERGE_PASSTHRU)
     MAKE_CASE(AArch64ISD::DUP_MERGE_PASSTHRU)
     MAKE_CASE(AArch64ISD::INDEX_VECTOR)
     MAKE_CASE(AArch64ISD::UABD)
@@ -3646,7 +3656,13 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
     return DAG.getNode(AArch64ISD::INSR, dl, Op.getValueType(),
                        Op.getOperand(1), Scalar);
   }
-
+  case Intrinsic::aarch64_sve_rbit:
+    return DAG.getNode(AArch64ISD::BITREVERSE_MERGE_PASSTHRU, dl,
+                       Op.getValueType(), Op.getOperand(2), Op.getOperand(3),
+                       Op.getOperand(1));
+  case Intrinsic::aarch64_sve_revb:
+    return DAG.getNode(AArch64ISD::BSWAP_MERGE_PASSTHRU, dl, Op.getValueType(),
+                       Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
   case Intrinsic::aarch64_sve_sxtb:
     return DAG.getNode(
         AArch64ISD::SIGN_EXTEND_INREG_MERGE_PASSTHRU, dl, Op.getValueType(),
@@ -4357,6 +4373,11 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
     return LowerFixedLengthVectorSelectToSVE(Op, DAG);
   case ISD::ABS:
     return LowerABS(Op, DAG);
+  case ISD::BITREVERSE:
+    return LowerToPredicatedOp(Op, DAG, AArch64ISD::BITREVERSE_MERGE_PASSTHRU,
+                               /*OverrideNEON=*/true);
+  case ISD::BSWAP:
+    return LowerToPredicatedOp(Op, DAG, AArch64ISD::BSWAP_MERGE_PASSTHRU);
   }
 }
 

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 96da82e48761..36518a5349b4 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -305,6 +305,8 @@ enum NodeType : unsigned {
   PTEST,
   PTRUE,
 
+  BITREVERSE_MERGE_PASSTHRU,
+  BSWAP_MERGE_PASSTHRU,
   DUP_MERGE_PASSTHRU,
   INDEX_VECTOR,
 

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index f28c55ae22e6..e9a823c6c413 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -214,6 +214,8 @@ def AArch64frintn_mt : SDNode<"AArch64ISD::FROUNDEVEN_MERGE_PASSTHRU", SDT_AArch
 def AArch64frintz_mt : SDNode<"AArch64ISD::FTRUNC_MERGE_PASSTHRU", SDT_AArch64Arith>;
 def AArch64fsqrt_mt  : SDNode<"AArch64ISD::FSQRT_MERGE_PASSTHRU", SDT_AArch64Arith>;
 def AArch64frecpx_mt : SDNode<"AArch64ISD::FRECPX_MERGE_PASSTHRU", SDT_AArch64Arith>;
+def AArch64rbit_mt   : SDNode<"AArch64ISD::BITREVERSE_MERGE_PASSTHRU", SDT_AArch64Arith>;
+def AArch64revb_mt   : SDNode<"AArch64ISD::BSWAP_MERGE_PASSTHRU", SDT_AArch64Arith>;
 
 def SDT_AArch64FCVT : SDTypeProfile<1, 3, [
   SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>,
@@ -571,8 +573,8 @@ let Predicates = [HasSVE] in {
   defm INSR_ZV : sve_int_perm_insrv<"insr", AArch64insr>;
   defm EXT_ZZI : sve_int_perm_extract_i<"ext", AArch64ext>;
 
-  defm RBIT_ZPmZ : sve_int_perm_rev_rbit<"rbit", int_aarch64_sve_rbit>;
-  defm REVB_ZPmZ : sve_int_perm_rev_revb<"revb", int_aarch64_sve_revb, bswap>;
+  defm RBIT_ZPmZ : sve_int_perm_rev_rbit<"rbit", AArch64rbit_mt>;
+  defm REVB_ZPmZ : sve_int_perm_rev_revb<"revb", AArch64revb_mt>;
   defm REVH_ZPmZ : sve_int_perm_rev_revh<"revh", int_aarch64_sve_revh>;
   defm REVW_ZPmZ : sve_int_perm_rev_revw<"revw", int_aarch64_sve_revw>;
 

diff  --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index c4b4d95cd46d..b5077cf263e7 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -6012,26 +6012,20 @@ multiclass sve_int_perm_rev_rbit<string asm, SDPatternOperator op> {
   def _S : sve_int_perm_rev<0b10, 0b11, asm, ZPR32>;
   def _D : sve_int_perm_rev<0b11, 0b11, asm, ZPR64>;
 
-  def : SVE_3_Op_Pat<nxv16i8, op, nxv16i8, nxv16i1, nxv16i8, !cast<Instruction>(NAME # _B)>;
-  def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv8i1,  nxv8i16, !cast<Instruction>(NAME # _H)>;
-  def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv4i1,  nxv4i32, !cast<Instruction>(NAME # _S)>;
-  def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i1,  nxv2i64, !cast<Instruction>(NAME # _D)>;
+  def : SVE_1_Op_Passthru_Pat<nxv16i8, op, nxv16i1, nxv16i8, !cast<Instruction>(NAME # _B)>;
+  def : SVE_1_Op_Passthru_Pat<nxv8i16, op, nxv8i1,  nxv8i16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_1_Op_Passthru_Pat<nxv4i32, op, nxv4i1,  nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Passthru_Pat<nxv2i64, op, nxv2i1,  nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
-multiclass sve_int_perm_rev_revb<string asm,
-                                 SDPatternOperator int_op,
-                                 SDPatternOperator ir_op> {
+multiclass sve_int_perm_rev_revb<string asm, SDPatternOperator op> {
   def _H : sve_int_perm_rev<0b01, 0b00, asm, ZPR16>;
   def _S : sve_int_perm_rev<0b10, 0b00, asm, ZPR32>;
   def _D : sve_int_perm_rev<0b11, 0b00, asm, ZPR64>;
 
-  def : SVE_3_Op_Pat<nxv8i16, int_op, nxv8i16, nxv8i1,  nxv8i16, !cast<Instruction>(NAME # _H)>;
-  def : SVE_3_Op_Pat<nxv4i32, int_op, nxv4i32, nxv4i1,  nxv4i32, !cast<Instruction>(NAME # _S)>;
-  def : SVE_3_Op_Pat<nxv2i64, int_op, nxv2i64, nxv2i1,  nxv2i64, !cast<Instruction>(NAME # _D)>;
-
-  def : SVE_1_Op_AllActive_Pat<nxv8i16, ir_op, nxv8i16, !cast<Instruction>(NAME # _H), PTRUE_H>;
-  def : SVE_1_Op_AllActive_Pat<nxv4i32, ir_op, nxv4i32, !cast<Instruction>(NAME # _S), PTRUE_S>;
-  def : SVE_1_Op_AllActive_Pat<nxv2i64, ir_op, nxv2i64, !cast<Instruction>(NAME # _D), PTRUE_D>;
+  def : SVE_1_Op_Passthru_Pat<nxv8i16, op, nxv8i1,  nxv8i16, !cast<Instruction>(NAME # _H)>;
+  def : SVE_1_Op_Passthru_Pat<nxv4i32, op, nxv4i1,  nxv4i32, !cast<Instruction>(NAME # _S)>;
+  def : SVE_1_Op_Passthru_Pat<nxv2i64, op, nxv2i1,  nxv2i64, !cast<Instruction>(NAME # _D)>;
 }
 
 multiclass sve_int_perm_rev_revh<string asm, SDPatternOperator op> {

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-rev.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-rev.ll
new file mode 100644
index 000000000000..0e85c9e28def
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-rev.ll
@@ -0,0 +1,643 @@
+; RUN: llc -aarch64-sve-vector-bits-min=128  -asm-verbose=0 < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 -asm-verbose=0 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Don't use SVE when its registers are no bigger than NEON.
+; NO_SVE-NOT: ptrue
+
+;
+; RBIT
+;
+
+define <8 x i8> @bitreverse_v8i8(<8 x i8> %op) #0 {
+; CHECK-LABEL: bitreverse_v8i8:
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl8
+; CHECK-NEXT: rbit z0.b, [[PG]]/m, z0.b
+; CHECK-NEXT: ret
+  %res = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %op)
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @bitreverse_v16i8(<16 x i8> %op) #0 {
+; CHECK-LABEL: bitreverse_v16i8:
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl16
+; CHECK-NEXT: rbit z0.b, [[PG]]/m, z0.b
+; CHECK-NEXT: ret
+  %res = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %op)
+  ret <16 x i8> %res
+}
+
+define void @bitreverse_v32i8(<32 x i8>* %a) #0 {
+; CHECK-LABEL: bitreverse_v32i8:
+; CHECK: ptrue [[PG:p[0-9]+]].b, vl32
+; CHECK-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; CHECK-NEXT: rbit [[RES:z[0-9]+]].b, [[PG]]/m, [[OP]].b
+; CHECK-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
+; CHECK-NEXT: ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %res = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %op)
+  store <32 x i8> %res, <32 x i8>* %a
+  ret void
+}
+
+define void @bitreverse_v64i8(<64 x i8>* %a) #0 {
+; CHECK-LABEL: bitreverse_v64i8:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].b, vl64
+; VBITS_GE_512-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: rbit [[RES:z[0-9]+]].b, [[PG]]/m, [[OP]].b
+; VBITS_GE_512-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
+; VBITS_GE_512-NEXT: ret
+;
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].b, vl32
+; VBITS_EQ_256-DAG: mov w[[A:[0-9]+]], #32
+; VBITS_EQ_256-DAG: ld1b { [[OP_LO:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1b { [[OP_HI:z[0-9]+]].b }, [[PG]]/z, [x0, x[[A]]]
+; VBITS_EQ_256-DAG: rbit [[RES_LO:z[0-9]+]].b, [[PG]]/m, [[OP_LO]].b
+; VBITS_EQ_256-DAG: rbit [[RES_HI:z[0-9]+]].b, [[PG]]/m, [[OP_HI]].b
+; VBITS_EQ_256-DAG: st1b { [[RES_LO]].b }, [[PG]], [x0]
+; VBITS_EQ_256-DAG: st1b { [[RES_HI]].b }, [[PG]], [x0, x[[A]]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <64 x i8>, <64 x i8>* %a
+  %res = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %op)
+  store <64 x i8> %res, <64 x i8>* %a
+  ret void
+}
+
+define void @bitreverse_v128i8(<128 x i8>* %a) #0 {
+; CHECK-LABEL: bitreverse_v128i8:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].b, vl128
+; VBITS_GE_1024-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: rbit [[RES:z[0-9]+]].b, [[PG]]/m, [[OP]].b
+; VBITS_GE_1024-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <128 x i8>, <128 x i8>* %a
+  %res = call <128 x i8> @llvm.bitreverse.v128i8(<128 x i8> %op)
+  store <128 x i8> %res, <128 x i8>* %a
+  ret void
+}
+
+define void @bitreverse_v256i8(<256 x i8>* %a) #0 {
+; CHECK-LABEL: bitreverse_v256i8:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].b, vl256
+; VBITS_GE_2048-NEXT: ld1b { [[OP:z[0-9]+]].b }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: rbit [[RES:z[0-9]+]].b, [[PG]]/m, [[OP]].b
+; VBITS_GE_2048-NEXT: st1b { [[RES]].b }, [[PG]], [x0]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <256 x i8>, <256 x i8>* %a
+  %res = call <256 x i8> @llvm.bitreverse.v256i8(<256 x i8> %op)
+  store <256 x i8> %res, <256 x i8>* %a
+  ret void
+}
+
+define <4 x i16> @bitreverse_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: bitreverse_v4i16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl4
+; CHECK-NEXT: rbit z0.h, [[PG]]/m, z0.h
+; CHECK-NEXT: ret
+  %res = call <4 x i16> @llvm.bitreverse.v4i16(<4 x i16> %op)
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @bitreverse_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: bitreverse_v8i16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl8
+; CHECK-NEXT: rbit z0.h, [[PG]]/m, z0.h
+; CHECK-NEXT: ret
+  %res = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %op)
+  ret <8 x i16> %res
+}
+
+define void @bitreverse_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: bitreverse_v16i16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: rbit [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK-NEXT: ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %op)
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define void @bitreverse_v32i16(<32 x i16>* %a) #0 {
+; CHECK-LABEL: bitreverse_v32i16:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
+; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: rbit [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1h { [[OP_LO:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1h { [[OP_HI:z[0-9]+]].h }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: rbit [[RES_LO:z[0-9]+]].h, [[PG]]/m, [[OP_LO]].h
+; VBITS_EQ_256-DAG: rbit [[RES_HI:z[0-9]+]].h, [[PG]]/m, [[OP_HI]].h
+; VBITS_EQ_256-DAG: st1h { [[RES_LO]].h }, [[PG]], [x0]
+; VBITS_EQ_256-DAG: st1h { [[RES_HI]].h }, [[PG]], [x[[A_HI]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <32 x i16>, <32 x i16>* %a
+  %res = call <32 x i16> @llvm.bitreverse.v32i16(<32 x i16> %op)
+  store <32 x i16> %res, <32 x i16>* %a
+  ret void
+}
+
+define void @bitreverse_v64i16(<64 x i16>* %a) #0 {
+; CHECK-LABEL: bitreverse_v64i16:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
+; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: rbit [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <64 x i16>, <64 x i16>* %a
+  %res = call <64 x i16> @llvm.bitreverse.v64i16(<64 x i16> %op)
+  store <64 x i16> %res, <64 x i16>* %a
+  ret void
+}
+
+define void @bitreverse_v128i16(<128 x i16>* %a) #0 {
+; CHECK-LABEL: bitreverse_v128i16:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
+; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: rbit [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <128 x i16>, <128 x i16>* %a
+  %res = call <128 x i16> @llvm.bitreverse.v128i16(<128 x i16> %op)
+  store <128 x i16> %res, <128 x i16>* %a
+  ret void
+}
+
+define <2 x i32> @bitreverse_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: bitreverse_v2i32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl2
+; CHECK-NEXT: rbit z0.s, [[PG]]/m, z0.s
+; CHECK-NEXT: ret
+  %res = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %op)
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @bitreverse_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: bitreverse_v4i32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl4
+; CHECK-NEXT: rbit z0.s, [[PG]]/m, z0.s
+; CHECK-NEXT: ret
+  %res = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %op)
+  ret <4 x i32> %res
+}
+
+define void @bitreverse_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: bitreverse_v8i32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: rbit [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK-NEXT: ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %op)
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define void @bitreverse_v16i32(<16 x i32>* %a) #0 {
+; CHECK-LABEL: bitreverse_v16i32:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
+; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: rbit [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1w { [[OP_LO:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1w { [[OP_HI:z[0-9]+]].s }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: rbit [[RES_LO:z[0-9]+]].s, [[PG]]/m, [[OP_LO]].s
+; VBITS_EQ_256-DAG: rbit [[RES_HI:z[0-9]+]].s, [[PG]]/m, [[OP_HI]].s
+; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG]], [x0]
+; VBITS_EQ_256-DAG: st1w { [[RES_HI]].s }, [[PG]], [x[[A_HI]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <16 x i32>, <16 x i32>* %a
+  %res = call <16 x i32> @llvm.bitreverse.v16i32(<16 x i32> %op)
+  store <16 x i32> %res, <16 x i32>* %a
+  ret void
+}
+
+define void @bitreverse_v32i32(<32 x i32>* %a) #0 {
+; CHECK-LABEL: bitreverse_v32i32:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
+; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: rbit [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <32 x i32>, <32 x i32>* %a
+  %res = call <32 x i32> @llvm.bitreverse.v32i32(<32 x i32> %op)
+  store <32 x i32> %res, <32 x i32>* %a
+  ret void
+}
+
+define void @bitreverse_v64i32(<64 x i32>* %a) #0 {
+; CHECK-LABEL: bitreverse_v64i32:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
+; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: rbit [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <64 x i32>, <64 x i32>* %a
+  %res = call <64 x i32> @llvm.bitreverse.v64i32(<64 x i32> %op)
+  store <64 x i32> %res, <64 x i32>* %a
+  ret void
+}
+
+define <1 x i64> @bitreverse_v1i64(<1 x i64> %op) #0 {
+; CHECK-LABEL: bitreverse_v1i64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl1
+; CHECK-NEXT: rbit z0.d, [[PG]]/m, z0.d
+; CHECK-NEXT: ret
+  %res = call <1 x i64> @llvm.bitreverse.v1i64(<1 x i64> %op)
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @bitreverse_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: bitreverse_v2i64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl2
+; CHECK-NEXT: rbit z0.d, [[PG]]/m, z0.d
+; CHECK-NEXT: ret
+  %res = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %op)
+  ret <2 x i64> %res
+}
+
+define void @bitreverse_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: bitreverse_v4i64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: rbit [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK-NEXT: ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %op)
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+define void @bitreverse_v8i64(<8 x i64>* %a) #0 {
+; CHECK-LABEL: bitreverse_v8i64:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
+; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: rbit [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1d { [[OP_LO:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1d { [[OP_HI:z[0-9]+]].d }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: rbit [[RES_LO:z[0-9]+]].d, [[PG]]/m, [[OP_LO]].d
+; VBITS_EQ_256-DAG: rbit [[RES_HI:z[0-9]+]].d, [[PG]]/m, [[OP_HI]].d
+; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG]], [x0]
+; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG]], [x[[A_HI]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <8 x i64>, <8 x i64>* %a
+  %res = call <8 x i64> @llvm.bitreverse.v8i64(<8 x i64> %op)
+  store <8 x i64> %res, <8 x i64>* %a
+  ret void
+}
+
+define void @bitreverse_v16i64(<16 x i64>* %a) #0 {
+; CHECK-LABEL: bitreverse_v16i64:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
+; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: rbit [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <16 x i64>, <16 x i64>* %a
+  %res = call <16 x i64> @llvm.bitreverse.v16i64(<16 x i64> %op)
+  store <16 x i64> %res, <16 x i64>* %a
+  ret void
+}
+
+define void @bitreverse_v32i64(<32 x i64>* %a) #0 {
+; CHECK-LABEL: bitreverse_v32i64:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
+; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: rbit [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <32 x i64>, <32 x i64>* %a
+  %res = call <32 x i64> @llvm.bitreverse.v32i64(<32 x i64> %op)
+  store <32 x i64> %res, <32 x i64>* %a
+  ret void
+}
+
+;
+; REVB
+;
+
+; Don't use SVE for 64-bit vectors.
+define <4 x i16> @bswap_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: bswap_v4i16:
+; CHECK: rev16 v0.8b, v0.8b
+; CHECK-NEXT: ret
+  %res = call <4 x i16> @llvm.bswap.v4i16(<4 x i16> %op)
+  ret <4 x i16> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <8 x i16> @bswap_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: bswap_v8i16:
+; CHECK: rev16 v0.16b, v0.16b
+; CHECK-NEXT: ret
+  %res = call <8 x i16> @llvm.bswap.v8i16(<8 x i16> %op)
+  ret <8 x i16> %res
+}
+
+define void @bswap_v16i16(<16 x i16>* %a) #0 {
+; CHECK-LABEL: bswap_v16i16:
+; CHECK: ptrue [[PG:p[0-9]+]].h, vl16
+; CHECK-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; CHECK-NEXT: revb [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; CHECK-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; CHECK-NEXT: ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %res = call <16 x i16> @llvm.bswap.v16i16(<16 x i16> %op)
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define void @bswap_v32i16(<32 x i16>* %a) #0 {
+; CHECK-LABEL: bswap_v32i16:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].h, vl32
+; VBITS_GE_512-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: revb [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; VBITS_GE_512-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1h { [[OP_LO:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1h { [[OP_HI:z[0-9]+]].h }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: revb [[RES_LO:z[0-9]+]].h, [[PG]]/m, [[OP_LO]].h
+; VBITS_EQ_256-DAG: revb [[RES_HI:z[0-9]+]].h, [[PG]]/m, [[OP_HI]].h
+; VBITS_EQ_256-DAG: st1h { [[RES_LO]].h }, [[PG]], [x0]
+; VBITS_EQ_256-DAG: st1h { [[RES_HI]].h }, [[PG]], [x[[A_HI]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <32 x i16>, <32 x i16>* %a
+  %res = call <32 x i16> @llvm.bswap.v32i16(<32 x i16> %op)
+  store <32 x i16> %res, <32 x i16>* %a
+  ret void
+}
+
+define void @bswap_v64i16(<64 x i16>* %a) #0 {
+; CHECK-LABEL: bswap_v64i16:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].h, vl64
+; VBITS_GE_1024-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: revb [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; VBITS_GE_1024-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <64 x i16>, <64 x i16>* %a
+  %res = call <64 x i16> @llvm.bswap.v64i16(<64 x i16> %op)
+  store <64 x i16> %res, <64 x i16>* %a
+  ret void
+}
+
+define void @bswap_v128i16(<128 x i16>* %a) #0 {
+; CHECK-LABEL: bswap_v128i16:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].h, vl128
+; VBITS_GE_2048-NEXT: ld1h { [[OP:z[0-9]+]].h }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: revb [[RES:z[0-9]+]].h, [[PG]]/m, [[OP]].h
+; VBITS_GE_2048-NEXT: st1h { [[RES]].h }, [[PG]], [x0]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <128 x i16>, <128 x i16>* %a
+  %res = call <128 x i16> @llvm.bswap.v128i16(<128 x i16> %op)
+  store <128 x i16> %res, <128 x i16>* %a
+  ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <2 x i32> @bswap_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: bswap_v2i32:
+; CHECK: rev32 v0.8b, v0.8b
+; CHECK-NEXT: ret
+  %res = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %op)
+  ret <2 x i32> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x i32> @bswap_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: bswap_v4i32:
+; CHECK: rev32 v0.16b, v0.16b
+; CHECK-NEXT: ret
+  %res = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> %op)
+  ret <4 x i32> %res
+}
+
+define void @bswap_v8i32(<8 x i32>* %a) #0 {
+; CHECK-LABEL: bswap_v8i32:
+; CHECK: ptrue [[PG:p[0-9]+]].s, vl8
+; CHECK-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; CHECK-NEXT: revb [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; CHECK-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; CHECK-NEXT: ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %res = call <8 x i32> @llvm.bswap.v8i32(<8 x i32> %op)
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define void @bswap_v16i32(<16 x i32>* %a) #0 {
+; CHECK-LABEL: bswap_v16i32:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].s, vl16
+; VBITS_GE_512-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: revb [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].s, vl8
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1w { [[OP_LO:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1w { [[OP_HI:z[0-9]+]].s }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: revb [[RES_LO:z[0-9]+]].s, [[PG]]/m, [[OP_LO]].s
+; VBITS_EQ_256-DAG: revb [[RES_HI:z[0-9]+]].s, [[PG]]/m, [[OP_HI]].s
+; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG]], [x0]
+; VBITS_EQ_256-DAG: st1w { [[RES_HI]].s }, [[PG]], [x[[A_HI]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <16 x i32>, <16 x i32>* %a
+  %res = call <16 x i32> @llvm.bswap.v16i32(<16 x i32> %op)
+  store <16 x i32> %res, <16 x i32>* %a
+  ret void
+}
+
+define void @bswap_v32i32(<32 x i32>* %a) #0 {
+; CHECK-LABEL: bswap_v32i32:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].s, vl32
+; VBITS_GE_1024-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: revb [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; VBITS_GE_1024-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <32 x i32>, <32 x i32>* %a
+  %res = call <32 x i32> @llvm.bswap.v32i32(<32 x i32> %op)
+  store <32 x i32> %res, <32 x i32>* %a
+  ret void
+}
+
+define void @bswap_v64i32(<64 x i32>* %a) #0 {
+; CHECK-LABEL: bswap_v64i32:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].s, vl64
+; VBITS_GE_2048-NEXT: ld1w { [[OP:z[0-9]+]].s }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: revb [[RES:z[0-9]+]].s, [[PG]]/m, [[OP]].s
+; VBITS_GE_2048-NEXT: st1w { [[RES]].s }, [[PG]], [x0]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <64 x i32>, <64 x i32>* %a
+  %res = call <64 x i32> @llvm.bswap.v64i32(<64 x i32> %op)
+  store <64 x i32> %res, <64 x i32>* %a
+  ret void
+}
+
+; Don't use SVE for 64-bit vectors.
+define <1 x i64> @bswap_v1i64(<1 x i64> %op) #0 {
+; CHECK-LABEL: bswap_v1i64:
+; CHECK: rev64 v0.8b, v0.8b
+; CHECK-NEXT: ret
+  %res = call <1 x i64> @llvm.bswap.v1i64(<1 x i64> %op)
+  ret <1 x i64> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <2 x i64> @bswap_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: bswap_v2i64:
+; CHECK: rev64 v0.16b, v0.16b
+; CHECK-NEXT: ret
+  %res = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %op)
+  ret <2 x i64> %res
+}
+
+define void @bswap_v4i64(<4 x i64>* %a) #0 {
+; CHECK-LABEL: bswap_v4i64:
+; CHECK: ptrue [[PG:p[0-9]+]].d, vl4
+; CHECK-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; CHECK-NEXT: revb [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; CHECK-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; CHECK-NEXT: ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %res = call <4 x i64> @llvm.bswap.v4i64(<4 x i64> %op)
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+define void @bswap_v8i64(<8 x i64>* %a) #0 {
+; CHECK-LABEL: bswap_v8i64:
+; VBITS_GE_512: ptrue [[PG:p[0-9]+]].d, vl8
+; VBITS_GE_512-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_512-NEXT: revb [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; VBITS_GE_512-NEXT: ret
+
+; Ensure sensible type legalisation.
+; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].d, vl4
+; VBITS_EQ_256-DAG: add x[[A_HI:[0-9]+]], x0, #32
+; VBITS_EQ_256-DAG: ld1d { [[OP_LO:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_EQ_256-DAG: ld1d { [[OP_HI:z[0-9]+]].d }, [[PG]]/z, [x[[A_HI]]]
+; VBITS_EQ_256-DAG: revb [[RES_LO:z[0-9]+]].d, [[PG]]/m, [[OP_LO]].d
+; VBITS_EQ_256-DAG: revb [[RES_HI:z[0-9]+]].d, [[PG]]/m, [[OP_HI]].d
+; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG]], [x0]
+; VBITS_EQ_256-DAG: st1d { [[RES_HI]].d }, [[PG]], [x[[A_HI]]
+; VBITS_EQ_256-NEXT: ret
+  %op = load <8 x i64>, <8 x i64>* %a
+  %res = call <8 x i64> @llvm.bswap.v8i64(<8 x i64> %op)
+  store <8 x i64> %res, <8 x i64>* %a
+  ret void
+}
+
+define void @bswap_v16i64(<16 x i64>* %a) #0 {
+; CHECK-LABEL: bswap_v16i64:
+; VBITS_GE_1024: ptrue [[PG:p[0-9]+]].d, vl16
+; VBITS_GE_1024-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_1024-NEXT: revb [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; VBITS_GE_1024-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; VBITS_GE_1024-NEXT: ret
+  %op = load <16 x i64>, <16 x i64>* %a
+  %res = call <16 x i64> @llvm.bswap.v16i64(<16 x i64> %op)
+  store <16 x i64> %res, <16 x i64>* %a
+  ret void
+}
+
+define void @bswap_v32i64(<32 x i64>* %a) #0 {
+; CHECK-LABEL: bswap_v32i64:
+; VBITS_GE_2048: ptrue [[PG:p[0-9]+]].d, vl32
+; VBITS_GE_2048-NEXT: ld1d { [[OP:z[0-9]+]].d }, [[PG]]/z, [x0]
+; VBITS_GE_2048-NEXT: revb [[RES:z[0-9]+]].d, [[PG]]/m, [[OP]].d
+; VBITS_GE_2048-NEXT: st1d { [[RES]].d }, [[PG]], [x0]
+; VBITS_GE_2048-NEXT: ret
+  %op = load <32 x i64>, <32 x i64>* %a
+  %res = call <32 x i64> @llvm.bswap.v32i64(<32 x i64> %op)
+  store <32 x i64> %res, <32 x i64>* %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare <8 x i8> @llvm.bitreverse.v8i8(<8 x i8>)
+declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>)
+declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>)
+declare <64 x i8> @llvm.bitreverse.v64i8(<64 x i8>)
+declare <128 x i8> @llvm.bitreverse.v128i8(<128 x i8>)
+declare <256 x i8> @llvm.bitreverse.v256i8(<256 x i8>)
+declare <4 x i16> @llvm.bitreverse.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>)
+declare <32 x i16> @llvm.bitreverse.v32i16(<32 x i16>)
+declare <64 x i16> @llvm.bitreverse.v64i16(<64 x i16>)
+declare <128 x i16> @llvm.bitreverse.v128i16(<128 x i16>)
+declare <2 x i32> @llvm.bitreverse.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>)
+declare <16 x i32> @llvm.bitreverse.v16i32(<16 x i32>)
+declare <32 x i32> @llvm.bitreverse.v32i32(<32 x i32>)
+declare <64 x i32> @llvm.bitreverse.v64i32(<64 x i32>)
+declare <1 x i64> @llvm.bitreverse.v1i64(<1 x i64>)
+declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>)
+declare <8 x i64> @llvm.bitreverse.v8i64(<8 x i64>)
+declare <16 x i64> @llvm.bitreverse.v16i64(<16 x i64>)
+declare <32 x i64> @llvm.bitreverse.v32i64(<32 x i64>)
+
+declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>)
+declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
+declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
+declare <32 x i16> @llvm.bswap.v32i16(<32 x i16>)
+declare <64 x i16> @llvm.bswap.v64i16(<64 x i16>)
+declare <128 x i16> @llvm.bswap.v128i16(<128 x i16>)
+declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
+declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
+declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
+declare <16 x i32> @llvm.bswap.v16i32(<16 x i32>)
+declare <32 x i32> @llvm.bswap.v32i32(<32 x i32>)
+declare <64 x i32> @llvm.bswap.v64i32(<64 x i32>)
+declare <1 x i64> @llvm.bswap.v1i64(<1 x i64>)
+declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
+declare <4 x i64> @llvm.bswap.v4i64(<4 x i64>)
+declare <8 x i64> @llvm.bswap.v8i64(<8 x i64>)
+declare <16 x i64> @llvm.bswap.v16i64(<16 x i64>)
+declare <32 x i64> @llvm.bswap.v32i64(<32 x i64>)
+

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll
index ae3fa5c419ba..33034f7bac01 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-reversal.ll
@@ -82,37 +82,6 @@ define <vscale x 2 x i64> @revb_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg
   ret <vscale x 2 x i64> %out
 }
 
-;
-; REVB (bswap)
-;
-
-define <vscale x 8 x i16> @revb_i16_bswap(<vscale x 8 x i16> %a) {
-; CHECK-LABEL: revb_i16_bswap:
-; CHECK: ptrue [[PG:p[0-9]+]].h
-; CHECK-NEXT: revb z0.h, [[PG]]/m, z0.h
-; CHECK-NEXT: ret
-  %res = call <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16> %a)
-  ret <vscale x 8 x i16> %res
-}
-
-define <vscale x 4 x i32> @revb_i32_bswap(<vscale x 4 x i32> %a) {
-; CHECK-LABEL: revb_i32_bswap:
-; CHECK: ptrue [[PG:p[0-9]+]].s
-; CHECK-NEXT: revb z0.s, [[PG]]/m, z0.s
-; CHECK-NEXT: ret
-  %res = call <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32> %a)
-  ret <vscale x 4 x i32> %res
-}
-
-define <vscale x 2 x i64> @revb_i64_bswap(<vscale x 2 x i64> %a) {
-; CHECK-LABEL: revb_i64_bswap:
-; CHECK: ptrue [[PG:p[0-9]+]].d
-; CHECK-NEXT: revb z0.d, [[PG]]/m, z0.d
-; CHECK-NEXT: ret
-  %res = call <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64> %a)
-  ret <vscale x 2 x i64> %res
-}
-
 ;
 ; REVH
 ;
@@ -160,10 +129,6 @@ declare <vscale x 8 x i16> @llvm.aarch64.sve.revb.nxv8i16(<vscale x 8 x i16>, <v
 declare <vscale x 4 x i32> @llvm.aarch64.sve.revb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.revb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
 
-declare <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16>)
-declare <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32>)
-declare <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64>)
-
 declare <vscale x 4 x i32> @llvm.aarch64.sve.revh.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>)
 declare <vscale x 2 x i64> @llvm.aarch64.sve.revh.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>)
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-rev.ll b/llvm/test/CodeGen/AArch64/sve-rev.ll
new file mode 100644
index 000000000000..c1f9eda48988
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-rev.ll
@@ -0,0 +1,97 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s 2>%t | FileCheck %s
+; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
+
+; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
+; WARN-NOT: warning
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; RBIT
+;
+
+define <vscale x 16 x i8> @bitreverse_i8(<vscale x 16 x i8> %a) #0 {
+; CHECK-LABEL: bitreverse_i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b
+; CHECK-NEXT:    rbit z0.b, p0/m, z0.b
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8> %a)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @bitreverse_i16(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: bitreverse_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    rbit z0.h, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16> %a)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @bitreverse_i32(<vscale x 4 x i32> %a) #0 {
+; CHECK-LABEL: bitreverse_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    rbit z0.s, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32> %a)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @bitreverse_i64(<vscale x 2 x i64> %a) #0 {
+; CHECK-LABEL: bitreverse_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    rbit z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64> %a)
+  ret <vscale x 2 x i64> %res
+}
+
+;
+; REVB
+;
+
+define <vscale x 8 x i16> @byteswap_i16(<vscale x 8 x i16> %a) #0 {
+; CHECK-LABEL: byteswap_i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h
+; CHECK-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK-NEXT:    ret
+  %res = call <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16> %a)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @byteswap_i32(<vscale x 4 x i32> %a) #0 {
+; CHECK-LABEL: byteswap_i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s
+; CHECK-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK-NEXT:    ret
+  %res = call <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32> %a)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @byteswap_i64(<vscale x 2 x i64> %a) #0 {
+; CHECK-LABEL: byteswap_i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d
+; CHECK-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK-NEXT:    ret
+  %res = call <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64> %a)
+  ret <vscale x 2 x i64> %res
+}
+
+attributes #0 = { "target-features"="+sve" }
+
+declare <vscale x 16 x i8> @llvm.bitreverse.nxv16i8(<vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.bitreverse.nxv8i16(<vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.bitreverse.nxv4i32(<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.bitreverse.nxv2i64(<vscale x 2 x i64>)
+
+declare <vscale x 8 x i16> @llvm.bswap.nxv8i16(<vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.bswap.nxv4i32(<vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.bswap.nxv2i64(<vscale x 2 x i64>)


        


More information about the llvm-commits mailing list