[llvm] [Work In Progress][LLVM][AArch64] Enable verifyTargetSDNode for scalable vectors and fix the fallout. (PR #104820)

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 19 10:48:13 PDT 2024


https://github.com/paulwalker-arm created https://github.com/llvm/llvm-project/pull/104820

Fix incorrect use of AArch64ISD::UZP1 in:
  AArch64TargetLowering::LowerDIV
  AArch64TargetLowering::LowerINSERT_SUBVECTOR
    
The later highlighted DAG combines that relied on broken behaviour, which this patch also fixes.

>From 8f505884b2518bf15c3d6f4421eee0a90f4bd8ff Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 16 Aug 2024 17:17:09 +0100
Subject: [PATCH 1/3] [LLVM][AArch64] Fix invalid use of AArch64ISD::UZP2 in
 performConcatVectorsCombine.

---
 llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 97fb2c5f552731..c8699137a6ec75 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -19811,7 +19811,6 @@ static SDValue performConcatVectorsCombine(SDNode *N,
     // This optimization reduces instruction count.
     if (N00Opc == AArch64ISD::VLSHR && N10Opc == AArch64ISD::VLSHR &&
         N00->getOperand(1) == N10->getOperand(1)) {
-
       SDValue N000 = N00->getOperand(0);
       SDValue N100 = N10->getOperand(0);
       uint64_t N001ConstVal = N00->getConstantOperandVal(1),
@@ -19819,7 +19818,8 @@ static SDValue performConcatVectorsCombine(SDNode *N,
                NScalarSize = N->getValueType(0).getScalarSizeInBits();
 
       if (N001ConstVal == N101ConstVal && N001ConstVal > NScalarSize) {
-
+        N000 = DAG.getNode(AArch64ISD::NVCAST, dl, VT, N000);
+        N100 = DAG.getNode(AArch64ISD::NVCAST, dl, VT, N100);
         SDValue Uzp = DAG.getNode(AArch64ISD::UZP2, dl, VT, N000, N100);
         SDValue NewShiftConstant =
             DAG.getConstant(N001ConstVal - NScalarSize, dl, MVT::i32);
@@ -29264,8 +29264,10 @@ void AArch64TargetLowering::verifyTargetSDNode(const SDNode *N) const {
     assert(OpVT.getSizeInBits() == VT.getSizeInBits() &&
            "Expected vectors of equal size!");
     // TODO: Enable assert once bogus creations have been fixed.
-    // assert(OpVT.getVectorElementCount() == VT.getVectorElementCount()*2 &&
-    //       "Expected result vector with half the lanes of its input!");
+    if (VT.isScalableVector())
+      break;
+    assert(OpVT.getVectorElementCount() == VT.getVectorElementCount()*2 &&
+           "Expected result vector with half the lanes of its input!");
     break;
   }
   case AArch64ISD::TRN1:
@@ -29282,7 +29284,9 @@ void AArch64TargetLowering::verifyTargetSDNode(const SDNode *N) const {
     assert(VT.isVector() && Op0VT.isVector() && Op1VT.isVector() &&
            "Expected vectors!");
     // TODO: Enable assert once bogus creations have been fixed.
-    // assert(VT == Op0VT && VT == Op1VT && "Expected matching vectors!");
+    if (VT.isScalableVector())
+      break;
+    assert(VT == Op0VT && VT == Op1VT && "Expected matching vectors!");
     break;
   }
   }

>From 2ace7ba85eab54dfb79afb88420f35d12eb88122 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Thu, 11 Apr 2024 18:35:39 +0100
Subject: [PATCH 2/3] [LLVM][AArch64] Improve big endian code generation for
 SVE BITCASTs.

For the most part I've tried to maintain the use of ISD::BITCAST
wherever possible. I'm assuming this will keep access to more DAG
combines, but perhaps it's more likely to just encourage the
proliferation of invalid combines than if I ensure only
AArch64ISD::NVCAST/REINTERPRET_CAST survives lowering?
---
 .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp |    2 +
 .../Target/AArch64/AArch64ISelLowering.cpp    |   59 +-
 .../lib/Target/AArch64/AArch64SVEInstrInfo.td |  163 +--
 llvm/test/CodeGen/AArch64/sve-bitcast.ll      | 1018 ++++-------------
 4 files changed, 347 insertions(+), 895 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index ab12c3b0e728a8..8d06206755d4f8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6140,6 +6140,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
     break;
   case ISD::BSWAP:
     assert(VT.isInteger() && VT == N1.getValueType() && "Invalid BSWAP!");
+    if (VT.getScalarSizeInBits() == 8)
+      return N1;
     assert((VT.getScalarSizeInBits() % 16 == 0) &&
            "BSWAP types must be a multiple of 16 bits!");
     if (OpOpcode == ISD::UNDEF)
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c8699137a6ec75..bebe48ac8a55a5 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1496,7 +1496,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::AVGCEILU, VT, Custom);
 
       if (!Subtarget->isLittleEndian())
-        setOperationAction(ISD::BITCAST, VT, Expand);
+        setOperationAction(ISD::BITCAST, VT, Custom);
 
       if (Subtarget->hasSVE2() ||
           (Subtarget->hasSME() && Subtarget->isStreaming()))
@@ -1510,9 +1510,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
     }
 
-    // Legalize unpacked bitcasts to REINTERPRET_CAST.
-    for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32, MVT::nxv2bf16,
-                    MVT::nxv4bf16, MVT::nxv2f16, MVT::nxv4f16, MVT::nxv2f32})
+    // Type legalize unpacked bitcasts.
+    for (auto VT : {MVT::nxv2i16, MVT::nxv4i16, MVT::nxv2i32})
       setOperationAction(ISD::BITCAST, VT, Custom);
 
     for (auto VT :
@@ -1587,6 +1586,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
 
     for (auto VT : {MVT::nxv2f16, MVT::nxv4f16, MVT::nxv8f16, MVT::nxv2f32,
                     MVT::nxv4f32, MVT::nxv2f64}) {
+      setOperationAction(ISD::BITCAST, VT, Custom);
       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
       setOperationAction(ISD::MLOAD, VT, Custom);
@@ -1658,20 +1658,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setCondCodeAction(ISD::SETUGT, VT, Expand);
       setCondCodeAction(ISD::SETUEQ, VT, Expand);
       setCondCodeAction(ISD::SETONE, VT, Expand);
-
-      if (!Subtarget->isLittleEndian())
-        setOperationAction(ISD::BITCAST, VT, Expand);
     }
 
     for (auto VT : {MVT::nxv2bf16, MVT::nxv4bf16, MVT::nxv8bf16}) {
+      setOperationAction(ISD::BITCAST, VT, Custom);
       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
       setOperationAction(ISD::MLOAD, VT, Custom);
       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
       setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
       setOperationAction(ISD::VECTOR_SPLICE, VT, Custom);
-
-      if (!Subtarget->isLittleEndian())
-        setOperationAction(ISD::BITCAST, VT, Expand);
     }
 
     setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
@@ -4960,22 +4955,35 @@ SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
     return LowerFixedLengthBitcastToSVE(Op, DAG);
 
   if (OpVT.isScalableVector()) {
-    // Bitcasting between unpacked vector types of different element counts is
-    // not a NOP because the live elements are laid out differently.
-    //                01234567
-    // e.g. nxv2i32 = XX??XX??
-    //      nxv4f16 = X?X?X?X?
-    if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount())
-      return SDValue();
+    assert(isTypeLegal(OpVT) && "Unexpected result type!");
 
-    if (isTypeLegal(OpVT) && !isTypeLegal(ArgVT)) {
+    // Handle type legalisation first.
+    if (!isTypeLegal(ArgVT)) {
       assert(OpVT.isFloatingPoint() && !ArgVT.isFloatingPoint() &&
              "Expected int->fp bitcast!");
+
+      // Bitcasting between unpacked vector types of different element counts is
+      // not a NOP because the live elements are laid out differently.
+      //                01234567
+      // e.g. nxv2i32 = XX??XX??
+      //      nxv4f16 = X?X?X?X?
+      if (OpVT.getVectorElementCount() != ArgVT.getVectorElementCount())
+        return SDValue();
+
       SDValue ExtResult =
           DAG.getNode(ISD::ANY_EXTEND, SDLoc(Op), getSVEContainerType(ArgVT),
                       Op.getOperand(0));
       return getSVESafeBitCast(OpVT, ExtResult, DAG);
     }
+
+    // Bitcasts between legal types with the same element count are legal.
+    if (OpVT.getVectorElementCount() == ArgVT.getVectorElementCount())
+      return Op;
+
+    // getSVESafeBitCast does not support casting between unpacked types.
+    if (!isPackedVectorType(OpVT, DAG))
+      return SDValue();
+
     return getSVESafeBitCast(OpVT, Op.getOperand(0), DAG);
   }
 
@@ -28877,7 +28885,20 @@ SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
   if (InVT != PackedInVT)
     Op = DAG.getNode(AArch64ISD::REINTERPRET_CAST, DL, PackedInVT, Op);
 
-  Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
+  if (Subtarget->isLittleEndian() ||
+      PackedVT.getScalarSizeInBits() == PackedInVT.getScalarSizeInBits())
+    Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
+  else {
+    EVT PackedVTAsInt = PackedVT.changeTypeToInteger();
+    EVT PackedInVTAsInt = PackedInVT.changeTypeToInteger();
+
+    // Simulate the effect of casting through memory.
+    Op = DAG.getNode(ISD::BITCAST, DL, PackedInVTAsInt, Op);
+    Op = DAG.getNode(ISD::BSWAP, DL, PackedInVTAsInt, Op);
+    Op = DAG.getNode(AArch64ISD::NVCAST, DL, PackedVTAsInt, Op);
+    Op = DAG.getNode(ISD::BSWAP, DL, PackedVTAsInt, Op);
+    Op = DAG.getNode(ISD::BITCAST, DL, PackedVT, Op);
+  }
 
   // Unpack result if required.
   if (VT != PackedVT)
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index d9a70b5ef02fcb..35035aae05ecb6 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -2650,113 +2650,62 @@ let Predicates = [HasSVEorSME] in {
                                sub_32)>;
   }
 
-  // FIXME: BigEndian requires an additional REV instruction to satisfy the
-  // constraint that none of the bits change when stored to memory as one
-  // type, and reloaded as another type.
-  let Predicates = [IsLE] in {
-    def : Pat<(nxv16i8 (bitconvert nxv8i16:$src)), (nxv16i8 ZPR:$src)>;
-    def : Pat<(nxv16i8 (bitconvert nxv4i32:$src)), (nxv16i8 ZPR:$src)>;
-    def : Pat<(nxv16i8 (bitconvert nxv2i64:$src)), (nxv16i8 ZPR:$src)>;
-    def : Pat<(nxv16i8 (bitconvert nxv8f16:$src)), (nxv16i8 ZPR:$src)>;
-    def : Pat<(nxv16i8 (bitconvert nxv4f32:$src)), (nxv16i8 ZPR:$src)>;
-    def : Pat<(nxv16i8 (bitconvert nxv2f64:$src)), (nxv16i8 ZPR:$src)>;
-
-    def : Pat<(nxv8i16 (bitconvert nxv16i8:$src)), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert nxv4i32:$src)), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert nxv2i64:$src)), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert nxv8f16:$src)), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert nxv4f32:$src)), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert nxv2f64:$src)), (nxv8i16 ZPR:$src)>;
-
-    def : Pat<(nxv4i32 (bitconvert nxv16i8:$src)), (nxv4i32 ZPR:$src)>;
-    def : Pat<(nxv4i32 (bitconvert nxv8i16:$src)), (nxv4i32 ZPR:$src)>;
-    def : Pat<(nxv4i32 (bitconvert nxv2i64:$src)), (nxv4i32 ZPR:$src)>;
-    def : Pat<(nxv4i32 (bitconvert nxv8f16:$src)), (nxv4i32 ZPR:$src)>;
-    def : Pat<(nxv4i32 (bitconvert nxv4f32:$src)), (nxv4i32 ZPR:$src)>;
-    def : Pat<(nxv4i32 (bitconvert nxv2f64:$src)), (nxv4i32 ZPR:$src)>;
-
-    def : Pat<(nxv2i64 (bitconvert nxv16i8:$src)), (nxv2i64 ZPR:$src)>;
-    def : Pat<(nxv2i64 (bitconvert nxv8i16:$src)), (nxv2i64 ZPR:$src)>;
-    def : Pat<(nxv2i64 (bitconvert nxv4i32:$src)), (nxv2i64 ZPR:$src)>;
-    def : Pat<(nxv2i64 (bitconvert nxv8f16:$src)), (nxv2i64 ZPR:$src)>;
-    def : Pat<(nxv2i64 (bitconvert nxv4f32:$src)), (nxv2i64 ZPR:$src)>;
-    def : Pat<(nxv2i64 (bitconvert nxv2f64:$src)), (nxv2i64 ZPR:$src)>;
-
-    def : Pat<(nxv8f16 (bitconvert nxv16i8:$src)), (nxv8f16 ZPR:$src)>;
-    def : Pat<(nxv8f16 (bitconvert nxv8i16:$src)), (nxv8f16 ZPR:$src)>;
-    def : Pat<(nxv8f16 (bitconvert nxv4i32:$src)), (nxv8f16 ZPR:$src)>;
-    def : Pat<(nxv8f16 (bitconvert nxv2i64:$src)), (nxv8f16 ZPR:$src)>;
-    def : Pat<(nxv8f16 (bitconvert nxv4f32:$src)), (nxv8f16 ZPR:$src)>;
-    def : Pat<(nxv8f16 (bitconvert nxv2f64:$src)), (nxv8f16 ZPR:$src)>;
-
-    def : Pat<(nxv4f32 (bitconvert nxv16i8:$src)), (nxv4f32 ZPR:$src)>;
-    def : Pat<(nxv4f32 (bitconvert nxv8i16:$src)), (nxv4f32 ZPR:$src)>;
-    def : Pat<(nxv4f32 (bitconvert nxv4i32:$src)), (nxv4f32 ZPR:$src)>;
-    def : Pat<(nxv4f32 (bitconvert nxv2i64:$src)), (nxv4f32 ZPR:$src)>;
-    def : Pat<(nxv4f32 (bitconvert nxv8f16:$src)), (nxv4f32 ZPR:$src)>;
-    def : Pat<(nxv4f32 (bitconvert nxv2f64:$src)), (nxv4f32 ZPR:$src)>;
-
-    def : Pat<(nxv2f64 (bitconvert nxv16i8:$src)), (nxv2f64 ZPR:$src)>;
-    def : Pat<(nxv2f64 (bitconvert nxv8i16:$src)), (nxv2f64 ZPR:$src)>;
-    def : Pat<(nxv2f64 (bitconvert nxv4i32:$src)), (nxv2f64 ZPR:$src)>;
-    def : Pat<(nxv2f64 (bitconvert nxv2i64:$src)), (nxv2f64 ZPR:$src)>;
-    def : Pat<(nxv2f64 (bitconvert nxv8f16:$src)), (nxv2f64 ZPR:$src)>;
-    def : Pat<(nxv2f64 (bitconvert nxv4f32:$src)), (nxv2f64 ZPR:$src)>;
-
-    def : Pat<(nxv8bf16 (bitconvert nxv16i8:$src)), (nxv8bf16 ZPR:$src)>;
-    def : Pat<(nxv8bf16 (bitconvert nxv8i16:$src)), (nxv8bf16 ZPR:$src)>;
-    def : Pat<(nxv8bf16 (bitconvert nxv4i32:$src)), (nxv8bf16 ZPR:$src)>;
-    def : Pat<(nxv8bf16 (bitconvert nxv2i64:$src)), (nxv8bf16 ZPR:$src)>;
-    def : Pat<(nxv8bf16 (bitconvert nxv8f16:$src)), (nxv8bf16 ZPR:$src)>;
-    def : Pat<(nxv8bf16 (bitconvert nxv4f32:$src)), (nxv8bf16 ZPR:$src)>;
-    def : Pat<(nxv8bf16 (bitconvert nxv2f64:$src)), (nxv8bf16 ZPR:$src)>;
-
-    def : Pat<(nxv16i8 (bitconvert nxv8bf16:$src)), (nxv16i8 ZPR:$src)>;
-    def : Pat<(nxv8i16 (bitconvert nxv8bf16:$src)), (nxv8i16 ZPR:$src)>;
-    def : Pat<(nxv4i32 (bitconvert nxv8bf16:$src)), (nxv4i32 ZPR:$src)>;
-    def : Pat<(nxv2i64 (bitconvert nxv8bf16:$src)), (nxv2i64 ZPR:$src)>;
-    def : Pat<(nxv8f16 (bitconvert nxv8bf16:$src)), (nxv8f16 ZPR:$src)>;
-    def : Pat<(nxv4f32 (bitconvert nxv8bf16:$src)), (nxv4f32 ZPR:$src)>;
-    def : Pat<(nxv2f64 (bitconvert nxv8bf16:$src)), (nxv2f64 ZPR:$src)>;
-
-    def : Pat<(nxv16i1 (bitconvert aarch64svcount:$src)), (nxv16i1 PPR:$src)>;
-    def : Pat<(aarch64svcount (bitconvert nxv16i1:$src)), (aarch64svcount PNR:$src)>;
-  }
-
-  // These allow casting from/to unpacked predicate types.
-  def : Pat<(nxv16i1 (reinterpret_cast nxv16i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv16i1 (reinterpret_cast nxv8i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv16i1 (reinterpret_cast nxv4i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv16i1 (reinterpret_cast nxv2i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv16i1 (reinterpret_cast nxv1i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv8i1 (reinterpret_cast nxv16i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv8i1 (reinterpret_cast  nxv4i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv8i1 (reinterpret_cast  nxv2i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv8i1 (reinterpret_cast  nxv1i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv4i1 (reinterpret_cast nxv16i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv4i1 (reinterpret_cast  nxv8i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv4i1 (reinterpret_cast  nxv2i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv4i1 (reinterpret_cast  nxv1i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv2i1 (reinterpret_cast nxv16i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv2i1 (reinterpret_cast  nxv8i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv2i1 (reinterpret_cast  nxv4i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv2i1 (reinterpret_cast  nxv1i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv1i1 (reinterpret_cast nxv16i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv1i1 (reinterpret_cast  nxv8i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv1i1 (reinterpret_cast  nxv4i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-  def : Pat<(nxv1i1 (reinterpret_cast  nxv2i1:$src)), (COPY_TO_REGCLASS PPR:$src, PPR)>;
-
-  // These allow casting from/to unpacked floating-point types.
-  def : Pat<(nxv2f16 (reinterpret_cast nxv8f16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv8f16 (reinterpret_cast nxv2f16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv4f16 (reinterpret_cast nxv8f16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv8f16 (reinterpret_cast nxv4f16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv2f32 (reinterpret_cast nxv4f32:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv4f32 (reinterpret_cast nxv2f32:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv2bf16 (reinterpret_cast nxv8bf16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv8bf16 (reinterpret_cast nxv2bf16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv4bf16 (reinterpret_cast nxv8bf16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
-  def : Pat<(nxv8bf16 (reinterpret_cast nxv4bf16:$src)), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
+  // For big endian, only BITCASTs involving same sized vector types with same
+  // size vector elements can be isel'd directly.
+  let Predicates = [IsLE] in
+    foreach VT = [ nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv8f16, nxv4f32, nxv2f64, nxv8bf16 ] in
+      foreach VT2 = [ nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv8f16, nxv4f32, nxv2f64, nxv8bf16 ] in
+        if !ne(VT,VT2) then
+          def : Pat<(VT (bitconvert (VT2 ZPR:$src))), (VT ZPR:$src)>;
+
+  def : Pat<(nxv8i16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+  def : Pat<(nxv8f16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+
+  def : Pat<(nxv4i32 (bitconvert (nxv4f32 ZPR:$src))), (nxv4i32 ZPR:$src)>;
+  def : Pat<(nxv4f32 (bitconvert (nxv4i32 ZPR:$src))), (nxv4f32 ZPR:$src)>;
+
+  def : Pat<(nxv2i64 (bitconvert (nxv2f64 ZPR:$src))), (nxv2i64 ZPR:$src)>;
+  def : Pat<(nxv2f64 (bitconvert (nxv2i64 ZPR:$src))), (nxv2f64 ZPR:$src)>;
+
+  def : Pat<(nxv8i16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8i16 ZPR:$src)>;
+  def : Pat<(nxv8bf16 (bitconvert (nxv8i16 ZPR:$src))), (nxv8bf16 ZPR:$src)>;
+
+  def : Pat<(nxv8bf16 (bitconvert (nxv8f16 ZPR:$src))), (nxv8bf16 ZPR:$src)>;
+  def : Pat<(nxv8f16 (bitconvert (nxv8bf16 ZPR:$src))), (nxv8f16 ZPR:$src)>;
+
+  def : Pat<(nxv4bf16 (bitconvert (nxv4f16 ZPR:$src))), (nxv4bf16 ZPR:$src)>;
+  def : Pat<(nxv4f16 (bitconvert (nxv4bf16 ZPR:$src))), (nxv4f16 ZPR:$src)>;
+
+  def : Pat<(nxv2bf16 (bitconvert (nxv2f16 ZPR:$src))), (nxv2bf16 ZPR:$src)>;
+  def : Pat<(nxv2f16 (bitconvert (nxv2bf16 ZPR:$src))), (nxv2f16 ZPR:$src)>;
+
+  def : Pat<(nxv16i1 (bitconvert (aarch64svcount PNR:$src))), (nxv16i1 PPR:$src)>;
+  def : Pat<(aarch64svcount (bitconvert (nxv16i1 PPR:$src))), (aarch64svcount PNR:$src)>;
+
+  // These allow nop casting between predicate vector types.
+  foreach VT = [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ] in
+    foreach VT2 = [ nxv16i1, nxv8i1, nxv4i1, nxv2i1, nxv1i1 ] in
+      def : Pat<(VT (reinterpret_cast (VT2 PPR:$src))), (COPY_TO_REGCLASS PPR:$src, PPR)>;
+
+  // These allow nop casting between half vector types.
+  foreach VT = [ nxv2f16, nxv4f16, nxv8f16 ] in
+    foreach VT2 = [ nxv2f16, nxv4f16, nxv8f16 ] in
+      def : Pat<(VT (reinterpret_cast (VT2 ZPR:$src))), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
+
+  // These allow nop casting between float vector types.
+  foreach VT = [ nxv2f32, nxv4f32 ] in
+    foreach VT2 = [ nxv2f32, nxv4f32 ] in
+      def : Pat<(VT (reinterpret_cast (VT2 ZPR:$src))), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
+
+  // These allow nop casting between bfloat vector types.
+  foreach VT = [ nxv2bf16, nxv4bf16, nxv8bf16 ] in
+    foreach VT2 = [ nxv2bf16, nxv4bf16, nxv8bf16 ] in
+      def : Pat<(VT (reinterpret_cast (VT2 ZPR:$src))), (COPY_TO_REGCLASS ZPR:$src, ZPR)>;
+
+  // These allow nop casting between all packed vector types.
+  foreach VT = [ nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv8f16, nxv4f32, nxv2f64, nxv8bf16 ] in
+    foreach VT2 = [ nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv8f16, nxv4f32, nxv2f64, nxv8bf16 ] in
+      def : Pat<(VT (AArch64NvCast (VT2 ZPR:$src))), (VT ZPR:$src)>;
 
   def : Pat<(nxv16i1 (and PPR:$Ps1, PPR:$Ps2)),
             (AND_PPzPP (PTRUE_B 31), PPR:$Ps1, PPR:$Ps2)>;
diff --git a/llvm/test/CodeGen/AArch64/sve-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
index 95f43ba5126323..5d12d41ac3332f 100644
--- a/llvm/test/CodeGen/AArch64/sve-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
@@ -13,14 +13,8 @@ define <vscale x 16 x i8> @bitcast_nxv8i16_to_nxv16i8(<vscale x 8 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i16_to_nxv16i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i16> %v to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %bc
@@ -33,14 +27,8 @@ define <vscale x 16 x i8> @bitcast_nxv4i32_to_nxv16i8(<vscale x 4 x i32> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i32_to_nxv16i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i32> %v to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %bc
@@ -53,14 +41,8 @@ define <vscale x 16 x i8> @bitcast_nxv2i64_to_nxv16i8(<vscale x 2 x i64> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i64_to_nxv16i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i64> %v to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %bc
@@ -73,14 +55,8 @@ define <vscale x 16 x i8> @bitcast_nxv8f16_to_nxv16i8(<vscale x 8 x half> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8f16_to_nxv16i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x half> %v to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %bc
@@ -93,14 +69,8 @@ define <vscale x 16 x i8> @bitcast_nxv4f32_to_nxv16i8(<vscale x 4 x float> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f32_to_nxv16i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x float> %v to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %bc
@@ -113,14 +83,8 @@ define <vscale x 16 x i8> @bitcast_nxv2f64_to_nxv16i8(<vscale x 2 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f64_to_nxv16i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x double> %v to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %bc
@@ -133,14 +97,8 @@ define <vscale x 16 x i8> @bitcast_nxv8bf16_to_nxv16i8(<vscale x 8 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8bf16_to_nxv16i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 16 x i8>
   ret <vscale x 16 x i8> %bc
@@ -157,14 +115,8 @@ define <vscale x 8 x i16> @bitcast_nxv16i8_to_nxv8i16(<vscale x 16 x i8> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv16i8_to_nxv8i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 16 x i8> %v to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %bc
@@ -177,14 +129,10 @@ define <vscale x 8 x i16> @bitcast_nxv4i32_to_nxv8i16(<vscale x 4 x i32> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i32_to_nxv8i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i32> %v to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %bc
@@ -197,14 +145,10 @@ define <vscale x 8 x i16> @bitcast_nxv2i64_to_nxv8i16(<vscale x 2 x i64> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i64_to_nxv8i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i64> %v to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %bc
@@ -217,13 +161,6 @@ define <vscale x 8 x i16> @bitcast_nxv8f16_to_nxv8i16(<vscale x 8 x half> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8f16_to_nxv8i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x half> %v to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %bc
@@ -236,14 +173,10 @@ define <vscale x 8 x i16> @bitcast_nxv4f32_to_nxv8i16(<vscale x 4 x float> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f32_to_nxv8i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x float> %v to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %bc
@@ -256,14 +189,10 @@ define <vscale x 8 x i16> @bitcast_nxv2f64_to_nxv8i16(<vscale x 2 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f64_to_nxv8i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x double> %v to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %bc
@@ -276,13 +205,6 @@ define <vscale x 8 x i16> @bitcast_nxv8bf16_to_nxv8i16(<vscale x 8 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8bf16_to_nxv8i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 8 x i16>
   ret <vscale x 8 x i16> %bc
@@ -299,14 +221,8 @@ define <vscale x 4 x i32> @bitcast_nxv16i8_to_nxv4i32(<vscale x 16 x i8> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv16i8_to_nxv4i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 16 x i8> %v to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %bc
@@ -319,14 +235,10 @@ define <vscale x 4 x i32> @bitcast_nxv8i16_to_nxv4i32(<vscale x 8 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i16_to_nxv4i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i16> %v to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %bc
@@ -339,14 +251,10 @@ define <vscale x 4 x i32> @bitcast_nxv2i64_to_nxv4i32(<vscale x 2 x i64> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i64_to_nxv4i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i64> %v to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %bc
@@ -359,14 +267,10 @@ define <vscale x 4 x i32> @bitcast_nxv8f16_to_nxv4i32(<vscale x 8 x half> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8f16_to_nxv4i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x half> %v to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %bc
@@ -379,13 +283,6 @@ define <vscale x 4 x i32> @bitcast_nxv4f32_to_nxv4i32(<vscale x 4 x float> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f32_to_nxv4i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x float> %v to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %bc
@@ -398,14 +295,10 @@ define <vscale x 4 x i32> @bitcast_nxv2f64_to_nxv4i32(<vscale x 2 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f64_to_nxv4i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x double> %v to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %bc
@@ -418,14 +311,10 @@ define <vscale x 4 x i32> @bitcast_nxv8bf16_to_nxv4i32(<vscale x 8 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8bf16_to_nxv4i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 4 x i32>
   ret <vscale x 4 x i32> %bc
@@ -442,14 +331,8 @@ define <vscale x 2 x i64> @bitcast_nxv16i8_to_nxv2i64(<vscale x 16 x i8> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv16i8_to_nxv2i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 16 x i8> %v to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %bc
@@ -462,14 +345,10 @@ define <vscale x 2 x i64> @bitcast_nxv8i16_to_nxv2i64(<vscale x 8 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i16_to_nxv2i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i16> %v to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %bc
@@ -482,14 +361,10 @@ define <vscale x 2 x i64> @bitcast_nxv4i32_to_nxv2i64(<vscale x 4 x i32> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i32_to_nxv2i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i32> %v to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %bc
@@ -502,14 +377,10 @@ define <vscale x 2 x i64> @bitcast_nxv8f16_to_nxv2i64(<vscale x 8 x half> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8f16_to_nxv2i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x half> %v to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %bc
@@ -522,14 +393,10 @@ define <vscale x 2 x i64> @bitcast_nxv4f32_to_nxv2i64(<vscale x 4 x float> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f32_to_nxv2i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x float> %v to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %bc
@@ -542,13 +409,6 @@ define <vscale x 2 x i64> @bitcast_nxv2f64_to_nxv2i64(<vscale x 2 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f64_to_nxv2i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x double> %v to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %bc
@@ -561,14 +421,10 @@ define <vscale x 2 x i64> @bitcast_nxv8bf16_to_nxv2i64(<vscale x 8 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8bf16_to_nxv2i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 2 x i64>
   ret <vscale x 2 x i64> %bc
@@ -585,14 +441,8 @@ define <vscale x 8 x half> @bitcast_nxv16i8_to_nxv8f16(<vscale x 16 x i8> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv16i8_to_nxv8f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 16 x i8> %v to <vscale x 8 x half>
   ret <vscale x 8 x half> %bc
@@ -605,13 +455,6 @@ define <vscale x 8 x half> @bitcast_nxv8i16_to_nxv8f16(<vscale x 8 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i16_to_nxv8f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i16> %v to <vscale x 8 x half>
   ret <vscale x 8 x half> %bc
@@ -624,14 +467,10 @@ define <vscale x 8 x half> @bitcast_nxv4i32_to_nxv8f16(<vscale x 4 x i32> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i32_to_nxv8f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i32> %v to <vscale x 8 x half>
   ret <vscale x 8 x half> %bc
@@ -644,14 +483,10 @@ define <vscale x 8 x half> @bitcast_nxv2i64_to_nxv8f16(<vscale x 2 x i64> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i64_to_nxv8f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i64> %v to <vscale x 8 x half>
   ret <vscale x 8 x half> %bc
@@ -664,14 +499,10 @@ define <vscale x 8 x half> @bitcast_nxv4f32_to_nxv8f16(<vscale x 4 x float> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f32_to_nxv8f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x float> %v to <vscale x 8 x half>
   ret <vscale x 8 x half> %bc
@@ -684,14 +515,10 @@ define <vscale x 8 x half> @bitcast_nxv2f64_to_nxv8f16(<vscale x 2 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f64_to_nxv8f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x double> %v to <vscale x 8 x half>
   ret <vscale x 8 x half> %bc
@@ -704,13 +531,6 @@ define <vscale x 8 x half> @bitcast_nxv8bf16_to_nxv8f16(<vscale x 8 x bfloat> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8bf16_to_nxv8f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 8 x half>
   ret <vscale x 8 x half> %bc
@@ -727,14 +547,8 @@ define <vscale x 4 x float> @bitcast_nxv16i8_to_nxv4f32(<vscale x 16 x i8> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv16i8_to_nxv4f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 16 x i8> %v to <vscale x 4 x float>
   ret <vscale x 4 x float> %bc
@@ -747,14 +561,10 @@ define <vscale x 4 x float> @bitcast_nxv8i16_to_nxv4f32(<vscale x 8 x i16> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i16_to_nxv4f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i16> %v to <vscale x 4 x float>
   ret <vscale x 4 x float> %bc
@@ -767,13 +577,6 @@ define <vscale x 4 x float> @bitcast_nxv4i32_to_nxv4f32(<vscale x 4 x i32> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i32_to_nxv4f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i32> %v to <vscale x 4 x float>
   ret <vscale x 4 x float> %bc
@@ -786,14 +589,10 @@ define <vscale x 4 x float> @bitcast_nxv2i64_to_nxv4f32(<vscale x 2 x i64> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i64_to_nxv4f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i64> %v to <vscale x 4 x float>
   ret <vscale x 4 x float> %bc
@@ -806,14 +605,10 @@ define <vscale x 4 x float> @bitcast_nxv8f16_to_nxv4f32(<vscale x 8 x half> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8f16_to_nxv4f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x half> %v to <vscale x 4 x float>
   ret <vscale x 4 x float> %bc
@@ -826,14 +621,10 @@ define <vscale x 4 x float> @bitcast_nxv2f64_to_nxv4f32(<vscale x 2 x double> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f64_to_nxv4f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x double> %v to <vscale x 4 x float>
   ret <vscale x 4 x float> %bc
@@ -846,14 +637,10 @@ define <vscale x 4 x float> @bitcast_nxv8bf16_to_nxv4f32(<vscale x 8 x bfloat> %
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8bf16_to_nxv4f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 4 x float>
   ret <vscale x 4 x float> %bc
@@ -870,14 +657,8 @@ define <vscale x 2 x double> @bitcast_nxv16i8_to_nxv2f64(<vscale x 16 x i8> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv16i8_to_nxv2f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 16 x i8> %v to <vscale x 2 x double>
   ret <vscale x 2 x double> %bc
@@ -890,14 +671,10 @@ define <vscale x 2 x double> @bitcast_nxv8i16_to_nxv2f64(<vscale x 8 x i16> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i16_to_nxv2f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i16> %v to <vscale x 2 x double>
   ret <vscale x 2 x double> %bc
@@ -910,14 +687,10 @@ define <vscale x 2 x double> @bitcast_nxv4i32_to_nxv2f64(<vscale x 4 x i32> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i32_to_nxv2f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i32> %v to <vscale x 2 x double>
   ret <vscale x 2 x double> %bc
@@ -930,13 +703,6 @@ define <vscale x 2 x double> @bitcast_nxv2i64_to_nxv2f64(<vscale x 2 x i64> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i64_to_nxv2f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i64> %v to <vscale x 2 x double>
   ret <vscale x 2 x double> %bc
@@ -949,14 +715,10 @@ define <vscale x 2 x double> @bitcast_nxv8f16_to_nxv2f64(<vscale x 8 x half> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8f16_to_nxv2f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x half> %v to <vscale x 2 x double>
   ret <vscale x 2 x double> %bc
@@ -969,14 +731,10 @@ define <vscale x 2 x double> @bitcast_nxv4f32_to_nxv2f64(<vscale x 4 x float> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f32_to_nxv2f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x float> %v to <vscale x 2 x double>
   ret <vscale x 2 x double> %bc
@@ -989,14 +747,10 @@ define <vscale x 2 x double> @bitcast_nxv8bf16_to_nxv2f64(<vscale x 8 x bfloat>
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8bf16_to_nxv2f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x bfloat> %v to <vscale x 2 x double>
   ret <vscale x 2 x double> %bc
@@ -1013,14 +767,8 @@ define <vscale x 8 x bfloat> @bitcast_nxv16i8_to_nxv8bf16(<vscale x 16 x i8> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv16i8_to_nxv8bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 16 x i8> %v to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %bc
@@ -1033,13 +781,6 @@ define <vscale x 8 x bfloat> @bitcast_nxv8i16_to_nxv8bf16(<vscale x 8 x i16> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i16_to_nxv8bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i16> %v to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %bc
@@ -1052,14 +793,10 @@ define <vscale x 8 x bfloat> @bitcast_nxv4i32_to_nxv8bf16(<vscale x 4 x i32> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i32_to_nxv8bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i32> %v to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %bc
@@ -1072,14 +809,10 @@ define <vscale x 8 x bfloat> @bitcast_nxv2i64_to_nxv8bf16(<vscale x 2 x i64> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i64_to_nxv8bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i64> %v to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %bc
@@ -1092,13 +825,6 @@ define <vscale x 8 x bfloat> @bitcast_nxv8f16_to_nxv8bf16(<vscale x 8 x half> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8f16_to_nxv8bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x half> %v to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %bc
@@ -1111,14 +837,10 @@ define <vscale x 8 x bfloat> @bitcast_nxv4f32_to_nxv8bf16(<vscale x 4 x float> %
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f32_to_nxv8bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x float> %v to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %bc
@@ -1131,14 +853,10 @@ define <vscale x 8 x bfloat> @bitcast_nxv2f64_to_nxv8bf16(<vscale x 2 x double>
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f64_to_nxv8bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x double> %v to <vscale x 8 x bfloat>
   ret <vscale x 8 x bfloat> %bc
@@ -1212,15 +930,9 @@ define <vscale x 8 x i8> @bitcast_nxv1i64_to_nxv8i8(<vscale x 1 x i64> %v) #0 {
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i64_to_nxv8i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    uunpklo z0.h, z0.b
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i64> %v to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %bc
@@ -1290,15 +1002,9 @@ define <vscale x 8 x i8> @bitcast_nxv1f64_to_nxv8i8(<vscale x 1 x double> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1f64_to_nxv8i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    uunpklo z0.h, z0.b
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x double> %v to <vscale x 8 x i8>
   ret <vscale x 8 x i8> %bc
@@ -1400,15 +1106,11 @@ define <vscale x 4 x i16> @bitcast_nxv1i64_to_nxv4i16(<vscale x 1 x i64> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i64_to_nxv4i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i64> %v to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %bc
@@ -1420,15 +1122,11 @@ define <vscale x 4 x i16> @bitcast_nxv4f16_to_nxv4i16(<vscale x 4 x half> %v) #0
 ; CHECK-NEXT:    ret
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f16_to_nxv4i16:
-; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE:       // %bb.0:
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x half> %v to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %bc
@@ -1470,15 +1168,11 @@ define <vscale x 4 x i16> @bitcast_nxv1f64_to_nxv4i16(<vscale x 1 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1f64_to_nxv4i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x double> %v to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %bc
@@ -1491,14 +1185,10 @@ define <vscale x 4 x i16> @bitcast_nxv4bf16_to_nxv4i16(<vscale x 4 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4bf16_to_nxv4i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x bfloat> %v to <vscale x 4 x i16>
   ret <vscale x 4 x i16> %bc
@@ -1572,15 +1262,11 @@ define <vscale x 2 x i32> @bitcast_nxv1i64_to_nxv2i32(<vscale x 1 x i64> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i64_to_nxv2i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    uunpklo z0.d, z0.s
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i64> %v to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %bc
@@ -1621,14 +1307,10 @@ define <vscale x 2 x i32> @bitcast_nxv2f32_to_nxv2i32(<vscale x 2 x float> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f32_to_nxv2i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x float> %v to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %bc
@@ -1642,15 +1324,11 @@ define <vscale x 2 x i32> @bitcast_nxv1f64_to_nxv2i32(<vscale x 1 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1f64_to_nxv2i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    uunpklo z0.d, z0.s
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x double> %v to <vscale x 2 x i32>
   ret <vscale x 2 x i32> %bc
@@ -1696,15 +1374,9 @@ define <vscale x 1 x i64> @bitcast_nxv8i8_to_nxv1i64(<vscale x 8 x i8> %v) #0 {
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i8_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.b, z0.b, z0.b
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i8> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1718,15 +1390,11 @@ define <vscale x 1 x i64> @bitcast_nxv4i16_to_nxv1i64(<vscale x 4 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i16_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i16> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1740,15 +1408,11 @@ define <vscale x 1 x i64> @bitcast_nxv2i32_to_nxv1i64(<vscale x 2 x i32> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i32_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i32> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1762,20 +1426,14 @@ define <vscale x 1 x i64> @bitcast_nxv4f16_to_nxv1i64(<vscale x 4 x half> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f16_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-3
 ; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #3
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x half> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1789,19 +1447,13 @@ define <vscale x 1 x i64> @bitcast_nxv2f32_to_nxv1i64(<vscale x 2 x float> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f32_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-3
 ; CHECK_BE-NEXT:    ptrue p0.s
 ; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #3
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x float> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1814,13 +1466,6 @@ define <vscale x 1 x i64> @bitcast_nxv1f64_to_nxv1i64(<vscale x 1 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1f64_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x double> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1834,20 +1479,14 @@ define <vscale x 1 x i64> @bitcast_nxv4bf16_to_nxv1i64(<vscale x 4 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4bf16_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-3
 ; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #3
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x bfloat> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1892,14 +1531,10 @@ define <vscale x 4 x half> @bitcast_nxv4i16_to_nxv4f16(<vscale x 4 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i16_to_nxv4f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i16> %v to <vscale x 4 x half>
   ret <vscale x 4 x half> %bc
@@ -1941,15 +1576,11 @@ define <vscale x 4 x half> @bitcast_nxv1i64_to_nxv4f16(<vscale x 1 x i64> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i64_to_nxv4f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i64> %v to <vscale x 4 x half>
   ret <vscale x 4 x half> %bc
@@ -1991,15 +1622,11 @@ define <vscale x 4 x half> @bitcast_nxv1f64_to_nxv4f16(<vscale x 1 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1f64_to_nxv4f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x double> %v to <vscale x 4 x half>
   ret <vscale x 4 x half> %bc
@@ -2012,13 +1639,6 @@ define <vscale x 4 x half> @bitcast_nxv4bf16_to_nxv4f16(<vscale x 4 x bfloat> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4bf16_to_nxv4f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x bfloat> %v to <vscale x 4 x half>
   ret <vscale x 4 x half> %bc
@@ -2091,14 +1711,10 @@ define <vscale x 2 x float> @bitcast_nxv2i32_to_nxv2f32(<vscale x 2 x i32> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i32_to_nxv2f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i32> %v to <vscale x 2 x float>
   ret <vscale x 2 x float> %bc
@@ -2112,15 +1728,11 @@ define <vscale x 2 x float> @bitcast_nxv1i64_to_nxv2f32(<vscale x 1 x i64> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i64_to_nxv2f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    uunpklo z0.d, z0.s
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i64> %v to <vscale x 2 x float>
   ret <vscale x 2 x float> %bc
@@ -2162,15 +1774,11 @@ define <vscale x 2 x float> @bitcast_nxv1f64_to_nxv2f32(<vscale x 1 x double> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1f64_to_nxv2f32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    uunpklo z0.d, z0.s
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x double> %v to <vscale x 2 x float>
   ret <vscale x 2 x float> %bc
@@ -2216,15 +1824,9 @@ define <vscale x 1 x double> @bitcast_nxv8i8_to_nxv1f64(<vscale x 8 x i8> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv8i8_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.b, z0.b, z0.b
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 8 x i8> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -2238,15 +1840,11 @@ define <vscale x 1 x double> @bitcast_nxv4i16_to_nxv1f64(<vscale x 4 x i16> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i16_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i16> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -2260,15 +1858,11 @@ define <vscale x 1 x double> @bitcast_nxv2i32_to_nxv1f64(<vscale x 2 x i32> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i32_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i32> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -2281,13 +1875,6 @@ define <vscale x 1 x double> @bitcast_nxv1i64_to_nxv1f64(<vscale x 1 x i64> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i64_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i64> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -2301,20 +1888,14 @@ define <vscale x 1 x double> @bitcast_nxv4f16_to_nxv1f64(<vscale x 4 x half> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f16_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-3
 ; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #3
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x half> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -2328,19 +1909,13 @@ define <vscale x 1 x double> @bitcast_nxv2f32_to_nxv1f64(<vscale x 2 x float> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f32_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-3
 ; CHECK_BE-NEXT:    ptrue p0.s
 ; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #3
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x float> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -2354,20 +1929,14 @@ define <vscale x 1 x double> @bitcast_nxv4bf16_to_nxv1f64(<vscale x 4 x bfloat>
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4bf16_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-3
 ; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    ptrue p1.d
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp, #2, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #3
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x bfloat> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -2412,14 +1981,10 @@ define <vscale x 4 x bfloat> @bitcast_nxv4i16_to_nxv4bf16(<vscale x 4 x i16> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i16_to_nxv4bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i16> %v to <vscale x 4 x bfloat>
   ret <vscale x 4 x bfloat> %bc
@@ -2461,15 +2026,11 @@ define <vscale x 4 x bfloat> @bitcast_nxv1i64_to_nxv4bf16(<vscale x 1 x i64> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i64_to_nxv4bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i64> %v to <vscale x 4 x bfloat>
   ret <vscale x 4 x bfloat> %bc
@@ -2482,13 +2043,6 @@ define <vscale x 4 x bfloat> @bitcast_nxv4f16_to_nxv4bf16(<vscale x 4 x half> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f16_to_nxv4bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    st1h { z0.s }, p0, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.s }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x half> %v to <vscale x 4 x bfloat>
   ret <vscale x 4 x bfloat> %bc
@@ -2530,15 +2084,11 @@ define <vscale x 4 x bfloat> @bitcast_nxv1f64_to_nxv4bf16(<vscale x 1 x double>
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1f64_to_nxv4bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x double> %v to <vscale x 4 x bfloat>
   ret <vscale x 4 x bfloat> %bc
@@ -2585,16 +2135,10 @@ define <vscale x 4 x i8> @bitcast_nxv1i32_to_nxv4i8(<vscale x 1 x i32> %v) #0 {
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i32_to_nxv4i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    uunpklo z0.h, z0.b
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i32> %v to <vscale x 4 x i8>
   ret <vscale x 4 x i8> %bc
@@ -2699,16 +2243,12 @@ define <vscale x 2 x i16> @bitcast_nxv1i32_to_nxv2i16(<vscale x 1 x i32> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i32_to_nxv2i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1w { z0.s }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.d, z0.s
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i32> %v to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %bc
@@ -2721,14 +2261,10 @@ define <vscale x 2 x i16> @bitcast_nxv2f16_to_nxv2i16(<vscale x 2 x half> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f16_to_nxv2i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x half> %v to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %bc
@@ -2743,14 +2279,10 @@ define <vscale x 2 x i16> @bitcast_nxv2bf16_to_nxv2i16(<vscale x 2 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2bf16_to_nxv2i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x bfloat> %v to <vscale x 2 x i16>
   ret <vscale x 2 x i16> %bc
@@ -2769,16 +2301,10 @@ define <vscale x 1 x i32> @bitcast_nxv4i8_to_nxv1i32(<vscale x 4 x i8> %v) #0 {
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4i8_to_nxv1i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.s
+; CHECK_BE-NEXT:    ptrue p0.s
 ; CHECK_BE-NEXT:    uzp1 z0.b, z0.b, z0.b
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 4 x i8> %v to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %bc
@@ -2793,16 +2319,12 @@ define <vscale x 1 x i32> @bitcast_nxv2i16_to_nxv1i32(<vscale x 2 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i16_to_nxv1i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
+; CHECK_BE-NEXT:    ptrue p0.s
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i16> %v to <vscale x 1 x i32>
   ret <vscale x 1 x i32> %bc
@@ -2824,15 +2346,15 @@ define <vscale x 1 x i32> @bitcast_nxv2f16_to_nxv1i32(<vscale x 2 x half> %v) #0
 ; CHECK_BE-LABEL: bitcast_nxv2f16_to_nxv1i32:
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-2
+; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    ptrue p1.h
 ; CHECK_BE-NEXT:    st1h { z0.d }, p0, [sp]
 ; CHECK_BE-NEXT:    ptrue p0.s
 ; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    st1h { z0.h }, p1, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #2
+; CHECK_BE-NEXT:    revb z0.h, p1/m, z0.h
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x half> %v to <vscale x 1 x i32>
@@ -2857,15 +2379,15 @@ define <vscale x 1 x i32> @bitcast_nxv2bf16_to_nxv1i32(<vscale x 2 x bfloat> %v)
 ; CHECK_BE-LABEL: bitcast_nxv2bf16_to_nxv1i32:
 ; CHECK_BE:       // %bb.0:
 ; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-2
+; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    ptrue p1.h
 ; CHECK_BE-NEXT:    st1h { z0.d }, p0, [sp]
 ; CHECK_BE-NEXT:    ptrue p0.s
 ; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    st1h { z0.h }, p1, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p0/z, [sp, #1, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #2
+; CHECK_BE-NEXT:    revb z0.h, p1/m, z0.h
+; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
+; CHECK_BE-NEXT:    addvl sp, sp, #1
 ; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x bfloat> %v to <vscale x 1 x i32>
@@ -2911,14 +2433,10 @@ define <vscale x 2 x half> @bitcast_nxv2i16_to_nxv2f16(<vscale x 2 x i16> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i16_to_nxv2f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i16> %v to <vscale x 2 x half>
   ret <vscale x 2 x half> %bc
@@ -2934,13 +2452,6 @@ define <vscale x 2 x half> @bitcast_nxv2bf16_to_nxv2f16(<vscale x 2 x bfloat> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2bf16_to_nxv2f16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x bfloat> %v to <vscale x 2 x half>
   ret <vscale x 2 x half> %bc
@@ -2995,14 +2506,10 @@ define <vscale x 2 x bfloat> @bitcast_nxv2i16_to_nxv2bf16(<vscale x 2 x i16> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i16_to_nxv2bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    ptrue p1.h
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.h
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i16> %v to <vscale x 2 x bfloat>
   ret <vscale x 2 x bfloat> %bc
@@ -3017,13 +2524,6 @@ define <vscale x 2 x bfloat> @bitcast_nxv2f16_to_nxv2bf16(<vscale x 2 x half> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f16_to_nxv2bf16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
-; CHECK_BE-NEXT:    ptrue p0.d
-; CHECK_BE-NEXT:    st1h { z0.d }, p0, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    ld1h { z0.d }, p0/z, [sp, #3, mul vl]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x half> %v to <vscale x 2 x bfloat>
   ret <vscale x 2 x bfloat> %bc
@@ -3045,17 +2545,11 @@ define <vscale x 2 x i8> @bitcast_nxv1i16_to_nxv2i8(<vscale x 1 x i16> %v) #0 {
 ;
 ; CHECK_BE-LABEL: bitcast_nxv1i16_to_nxv2i8:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.b
-; CHECK_BE-NEXT:    st1h { z0.h }, p0, [sp]
-; CHECK_BE-NEXT:    ld1b { z0.b }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.h, z0.b
 ; CHECK_BE-NEXT:    uunpklo z0.s, z0.h
 ; CHECK_BE-NEXT:    uunpklo z0.d, z0.s
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 1 x i16> %v to <vscale x 2 x i8>
   ret <vscale x 2 x i8> %bc
@@ -3078,17 +2572,11 @@ define <vscale x 1 x i16> @bitcast_nxv2i8_to_nxv1i16(<vscale x 2 x i8> %v) #0 {
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2i8_to_nxv1i16:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
-; CHECK_BE-NEXT:    ptrue p0.b
-; CHECK_BE-NEXT:    ptrue p1.h
+; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
 ; CHECK_BE-NEXT:    uzp1 z0.b, z0.b, z0.b
-; CHECK_BE-NEXT:    st1b { z0.b }, p0, [sp]
-; CHECK_BE-NEXT:    ld1h { z0.h }, p1/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x i8> %v to <vscale x 1 x i16>
   ret <vscale x 1 x i16> %bc
@@ -3126,15 +2614,11 @@ define <vscale x 2 x i32> @bitcast_short_float_to_i32(<vscale x 2 x double> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_short_float_to_i32:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    ptrue p1.s
 ; CHECK_BE-NEXT:    fcvt z0.s, p0/m, z0.d
-; CHECK_BE-NEXT:    st1w { z0.s }, p1, [sp]
-; CHECK_BE-NEXT:    ld1d { z0.d }, p0/z, [sp]
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
+; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %trunc = fptrunc <vscale x 2 x double> %v to <vscale x 2 x float>
   %bitcast = bitcast <vscale x 2 x float> %trunc to <vscale x 2 x i32>
@@ -3150,15 +2634,11 @@ define <vscale x 2 x double> @bitcast_short_i32_to_float(<vscale x 2 x i64> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_short_i32_to_float:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK_BE-NEXT:    addvl sp, sp, #-1
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    st1d { z0.d }, p0, [sp]
-; CHECK_BE-NEXT:    ld1w { z0.s }, p1/z, [sp]
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
+; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    fcvt z0.d, p0/m, z0.s
-; CHECK_BE-NEXT:    addvl sp, sp, #1
-; CHECK_BE-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
 ; CHECK_BE-NEXT:    ret
   %trunc = trunc <vscale x 2 x i64> %v to <vscale x 2 x i32>
   %bitcast = bitcast <vscale x 2 x i32> %trunc to <vscale x 2 x float>

>From 245d3c6491fdcacd29dbaf632741072e48865803 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 16 Aug 2024 17:04:43 +0100
Subject: [PATCH 3/3] [LLVM][AArch64] Enable verifyTargetSDNode for scalable
 vectors and fix the fallout.

Fix incorrect use of AArch64ISD::UZP1 in:
  AArch64TargetLowering::LowerDIV
  AArch64TargetLowering::LowerINSERT_SUBVECTOR

The later highlighted DAG combines that relied on broken behaviour,
which this patch also fixes.
---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 77 +++++++++++++------
 llvm/test/CodeGen/AArch64/sve-bitcast.ll      | 36 +++------
 2 files changed, 64 insertions(+), 49 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index bebe48ac8a55a5..265a425125d02d 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -14885,10 +14885,11 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
     // NOP cast operands to the largest legal vector of the same element count.
     if (VT.isFloatingPoint()) {
       Vec0 = getSVESafeBitCast(NarrowVT, Vec0, DAG);
-      Vec1 = getSVESafeBitCast(WideVT, Vec1, DAG);
+      Vec1 = getSVESafeBitCast(NarrowVT, Vec1, DAG);
     } else {
       // Legal integer vectors are already their largest so Vec0 is fine as is.
       Vec1 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Vec1);
+      Vec1 = DAG.getNode(AArch64ISD::NVCAST, DL, NarrowVT, Vec1);
     }
 
     // To replace the top/bottom half of vector V with vector SubV we widen the
@@ -14897,11 +14898,13 @@ SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
     SDValue Narrow;
     if (Idx == 0) {
       SDValue HiVec0 = DAG.getNode(AArch64ISD::UUNPKHI, DL, WideVT, Vec0);
+      HiVec0 = DAG.getNode(AArch64ISD::NVCAST, DL, NarrowVT, HiVec0);
       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, Vec1, HiVec0);
     } else {
       assert(Idx == InVT.getVectorMinNumElements() &&
              "Invalid subvector index!");
       SDValue LoVec0 = DAG.getNode(AArch64ISD::UUNPKLO, DL, WideVT, Vec0);
+      LoVec0 = DAG.getNode(AArch64ISD::NVCAST, DL, NarrowVT, LoVec0);
       Narrow = DAG.getNode(AArch64ISD::UZP1, DL, NarrowVT, LoVec0, Vec1);
     }
 
@@ -15001,7 +15004,9 @@ SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
   SDValue Op1Hi = DAG.getNode(UnpkHi, dl, WidenedVT, Op.getOperand(1));
   SDValue ResultLo = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Lo, Op1Lo);
   SDValue ResultHi = DAG.getNode(Op.getOpcode(), dl, WidenedVT, Op0Hi, Op1Hi);
-  return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLo, ResultHi);
+  SDValue ResultLoCast = DAG.getNode(AArch64ISD::NVCAST, dl, VT, ResultLo);
+  SDValue ResultHiCast = DAG.getNode(AArch64ISD::NVCAST, dl, VT, ResultHi);
+  return DAG.getNode(AArch64ISD::UZP1, dl, VT, ResultLoCast, ResultHiCast);
 }
 
 bool AArch64TargetLowering::shouldExpandBuildVectorWithShuffles(
@@ -22634,7 +22639,7 @@ static SDValue trySimplifySrlAddToRshrnb(SDValue Srl, SelectionDAG &DAG,
   SDValue Rshrnb = DAG.getNode(
       AArch64ISD::RSHRNB_I, DL, ResVT,
       {RShOperand, DAG.getTargetConstant(ShiftValue, DL, MVT::i32)});
-  return DAG.getNode(ISD::BITCAST, DL, VT, Rshrnb);
+  return DAG.getNode(AArch64ISD::NVCAST, DL, VT, Rshrnb);
 }
 
 static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG,
@@ -22697,25 +22702,41 @@ static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG,
   if (SDValue Urshr = tryCombineExtendRShTrunc(N, DAG))
     return Urshr;
 
-  if (SDValue Rshrnb = trySimplifySrlAddToRshrnb(Op0, DAG, Subtarget))
-    return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Rshrnb, Op1);
+  if (Op0.getOpcode() == AArch64ISD::NVCAST &&
+      ResVT.getVectorElementCount() == Op0.getOperand(0).getValueType().getVectorElementCount()*2){
+    if (SDValue Rshrnb = trySimplifySrlAddToRshrnb(Op0->getOperand(0), DAG, Subtarget)) {
+      Rshrnb = DAG.getNode(AArch64ISD::NVCAST, DL, ResVT, Rshrnb);
+      return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Rshrnb, Op1);
+    }
+  }
 
-  if (SDValue Rshrnb = trySimplifySrlAddToRshrnb(Op1, DAG, Subtarget))
-    return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Rshrnb);
+  if (Op1.getOpcode() == AArch64ISD::NVCAST &&
+      ResVT.getVectorElementCount() == Op1.getOperand(0).getValueType().getVectorElementCount()*2){
+    if (SDValue Rshrnb = trySimplifySrlAddToRshrnb(Op1->getOperand(0), DAG, Subtarget)) {
+      Rshrnb = DAG.getNode(AArch64ISD::NVCAST, DL, ResVT, Rshrnb);
+      return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Rshrnb);
+    }
+  }
 
-  // uzp1(unpklo(uzp1(x, y)), z) => uzp1(x, z)
-  if (Op0.getOpcode() == AArch64ISD::UUNPKLO) {
-    if (Op0.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
-      SDValue X = Op0.getOperand(0).getOperand(0);
-      return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1);
+  // uzp1<ty>(nvcast(unpklo(uzp1<ty>(x, y))), z) => uzp1<ty>(x, z)
+  if (Op0.getOpcode() == AArch64ISD::NVCAST) {
+    if (Op0.getOperand(0).getOpcode() == AArch64ISD::UUNPKLO) {
+      if (Op0.getOperand(0).getOperand(0).getOpcode() == AArch64ISD::UZP1) {
+        SDValue X = Op0.getOperand(0).getOperand(0).getOperand(0);
+        if (ResVT == X.getValueType())
+          return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, X, Op1);
+      }
     }
   }
 
-  // uzp1(x, unpkhi(uzp1(y, z))) => uzp1(x, z)
-  if (Op1.getOpcode() == AArch64ISD::UUNPKHI) {
-    if (Op1.getOperand(0).getOpcode() == AArch64ISD::UZP1) {
-      SDValue Z = Op1.getOperand(0).getOperand(1);
-      return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z);
+  // uzp1<ty>(x, nvcast(unpkhi(uzp1<ty>(y, z)))) => uzp1<ty>(x, z)
+  if (Op1.getOpcode() == AArch64ISD::NVCAST) {
+    if (Op1.getOperand(0).getOpcode() == AArch64ISD::UUNPKHI) {
+      if (Op1.getOperand(0).getOperand(0).getOpcode() == AArch64ISD::UZP1) {
+        SDValue Z = Op1.getOperand(0).getOperand(0).getOperand(1);
+        if (ResVT == Z.getValueType())
+          return DAG.getNode(AArch64ISD::UZP1, DL, ResVT, Op0, Z);
+      }
     }
   }
 
@@ -29284,9 +29305,6 @@ void AArch64TargetLowering::verifyTargetSDNode(const SDNode *N) const {
            VT.isInteger() && "Expected integer vectors!");
     assert(OpVT.getSizeInBits() == VT.getSizeInBits() &&
            "Expected vectors of equal size!");
-    // TODO: Enable assert once bogus creations have been fixed.
-    if (VT.isScalableVector())
-      break;
     assert(OpVT.getVectorElementCount() == VT.getVectorElementCount()*2 &&
            "Expected result vector with half the lanes of its input!");
     break;
@@ -29304,12 +29322,25 @@ void AArch64TargetLowering::verifyTargetSDNode(const SDNode *N) const {
     EVT Op1VT = N->getOperand(1).getValueType();
     assert(VT.isVector() && Op0VT.isVector() && Op1VT.isVector() &&
            "Expected vectors!");
-    // TODO: Enable assert once bogus creations have been fixed.
-    if (VT.isScalableVector())
-      break;
     assert(VT == Op0VT && VT == Op1VT && "Expected matching vectors!");
     break;
   }
+  case AArch64ISD::RSHRNB_I: {
+    assert(N->getNumValues() == 1 && "Expected one result!");
+    assert(N->getNumOperands() == 2 && "Expected two operand!");
+    EVT VT = N->getValueType(0);
+    EVT Op0VT = N->getOperand(0).getValueType();
+    EVT Op1VT = N->getOperand(1).getValueType();
+    assert(VT.isVector() && Op0VT.isVector() && VT.isInteger() &&
+           Op0VT.isInteger() && "Expected integer vectors!");
+    assert(VT.getSizeInBits() == Op0VT.getSizeInBits() &&
+           "Expected vectors of equal size!");
+    assert(VT.getVectorElementCount() == Op0VT.getVectorElementCount() * 2 &&
+           "Expected result vector with half the lanes of its input!");
+    assert(Op1VT == MVT::i32 && isa<ConstantSDNode>(N->getOperand(1)) &&
+           "Expected second operand to be a constant i32!");
+    break;
+  }
   }
 }
 #endif
diff --git a/llvm/test/CodeGen/AArch64/sve-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
index 5d12d41ac3332f..7535398b5243c3 100644
--- a/llvm/test/CodeGen/AArch64/sve-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/sve-bitcast.ll
@@ -1426,11 +1426,8 @@ define <vscale x 1 x i64> @bitcast_nxv4f16_to_nxv1i64(<vscale x 4 x half> %v) #0
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f16_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
-; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
@@ -1447,13 +1444,11 @@ define <vscale x 1 x i64> @bitcast_nxv2f32_to_nxv1i64(<vscale x 2 x float> %v) #
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f32_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
-; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK_BE-NEXT:    ptrue p0.s
 ; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
-; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x float> %v to <vscale x 1 x i64>
   ret <vscale x 1 x i64> %bc
@@ -1479,11 +1474,8 @@ define <vscale x 1 x i64> @bitcast_nxv4bf16_to_nxv1i64(<vscale x 4 x bfloat> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4bf16_to_nxv1i64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
-; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
@@ -1888,11 +1880,8 @@ define <vscale x 1 x double> @bitcast_nxv4f16_to_nxv1f64(<vscale x 4 x half> %v)
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4f16_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
-; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
@@ -1909,13 +1898,11 @@ define <vscale x 1 x double> @bitcast_nxv2f32_to_nxv1f64(<vscale x 2 x float> %v
 ;
 ; CHECK_BE-LABEL: bitcast_nxv2f32_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    ptrue p0.s
-; CHECK_BE-NEXT:    ptrue p1.d
-; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
-; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
 ; CHECK_BE-NEXT:    uzp1 z0.s, z0.s, z0.s
+; CHECK_BE-NEXT:    ptrue p0.s
 ; CHECK_BE-NEXT:    revb z0.s, p0/m, z0.s
-; CHECK_BE-NEXT:    revb z0.d, p1/m, z0.d
+; CHECK_BE-NEXT:    ptrue p0.d
+; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d
 ; CHECK_BE-NEXT:    ret
   %bc = bitcast <vscale x 2 x float> %v to <vscale x 1 x double>
   ret <vscale x 1 x double> %bc
@@ -1929,11 +1916,8 @@ define <vscale x 1 x double> @bitcast_nxv4bf16_to_nxv1f64(<vscale x 4 x bfloat>
 ;
 ; CHECK_BE-LABEL: bitcast_nxv4bf16_to_nxv1f64:
 ; CHECK_BE:       // %bb.0:
-; CHECK_BE-NEXT:    ptrue p0.h
-; CHECK_BE-NEXT:    ptrue p1.s
-; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
-; CHECK_BE-NEXT:    revb z0.s, p1/m, z0.s
 ; CHECK_BE-NEXT:    uzp1 z0.h, z0.h, z0.h
+; CHECK_BE-NEXT:    ptrue p0.h
 ; CHECK_BE-NEXT:    revb z0.h, p0/m, z0.h
 ; CHECK_BE-NEXT:    ptrue p0.d
 ; CHECK_BE-NEXT:    revb z0.d, p0/m, z0.d



More information about the llvm-commits mailing list