[llvm] 681888e - [AArch64-SVE]: Force generating code compatible to streaming mode.

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 31 04:03:00 PDT 2022


Author: Hassnaa Hamdi
Date: 2022-10-31T11:01:56Z
New Revision: 681888e3ab3485375909332140657845eec9bd0f

URL: https://github.com/llvm/llvm-project/commit/681888e3ab3485375909332140657845eec9bd0f
DIFF: https://github.com/llvm/llvm-project/commit/681888e3ab3485375909332140657845eec9bd0f.diff

LOG: [AArch64-SVE]: Force generating code compatible to streaming mode.

When streaming mode is enabled, lower some operations and disable some code paths;
to force generateing code compatible to streaming mode.
Add streaming-mode flag for new sve-fixed-length testing files:
build_vector.ll
concat.ll
extract-subvector.ll
extract-vector-elt.ll
int-shifts.ll
loads.ll
shuffle.ll
stores.ll

Differential Revision: https://reviews.llvm.org/D135564

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/AArch64ISelLowering.h
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index afbbe1fc0ddb..cc0d92fd25f8 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1391,6 +1391,16 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
     for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64})
       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
 
+    if (Subtarget->forceStreamingCompatibleSVE()) {
+      for (MVT VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
+                     MVT::v4i32, MVT::v2i64})
+        addTypeForStreamingSVE(VT);
+
+      for (MVT VT :
+           {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64})
+        addTypeForStreamingSVE(VT);
+    }
+
     // NOTE: Currently this has to happen after computeRegisterProperties rather
     // than the preferred option of combining it with the addRegisterClass call.
     if (Subtarget->useSVEForFixedLengthVectors()) {
@@ -1597,6 +1607,14 @@ bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
   return false;
 }
 
+void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) {
+  setOperationAction(ISD::ANY_EXTEND, VT, Custom);
+  setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
+  setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
+  setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
+  setOperationAction(ISD::AND, VT, Custom);
+}
+
 void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
   assert(VT.isFixedLengthVector() && "Expected fixed length vector type!");
 
@@ -5773,8 +5791,7 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   case ISD::MLOAD:
     return LowerMLOAD(Op, DAG);
   case ISD::LOAD:
-    if (useSVEForFixedLengthVectorVT(Op.getValueType(),
-                                     Subtarget->forceStreamingCompatibleSVE()))
+    if (useSVEForFixedLengthVectorVT(Op.getValueType()))
       return LowerFixedLengthVectorLoadToSVE(Op, DAG);
     return LowerLOAD(Op, DAG);
   case ISD::ADD:
@@ -11400,9 +11417,13 @@ static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
 static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
                                   const APInt &Bits,
                                   const SDValue *LHS = nullptr) {
+  EVT VT = Op.getValueType();
+  if (VT.isFixedLengthVector() &&
+      DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE())
+    return SDValue();
+
   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
-    EVT VT = Op.getValueType();
     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32;
     bool isAdvSIMDModImm = false;
     uint64_t Shift;
@@ -11448,9 +11469,13 @@ static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
 static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
                                   const APInt &Bits,
                                   const SDValue *LHS = nullptr) {
+  EVT VT = Op.getValueType();
+  if (VT.isFixedLengthVector() &&
+      DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE())
+    return SDValue();
+
   if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
     uint64_t Value = Bits.zextOrTrunc(64).getZExtValue();
-    EVT VT = Op.getValueType();
     MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16;
     bool isAdvSIMDModImm = false;
     uint64_t Shift;
@@ -12128,7 +12153,8 @@ SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
 
 SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
                                                    SelectionDAG &DAG) const {
-  if (useSVEForFixedLengthVectorVT(Op.getValueType()))
+  if (useSVEForFixedLengthVectorVT(Op.getValueType(),
+                                   Subtarget->forceStreamingCompatibleSVE()))
     return LowerFixedLengthConcatVectorsToSVE(Op, DAG);
 
   assert(Op.getValueType().isScalableVector() &&
@@ -12234,7 +12260,8 @@ AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
     return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType());
   }
 
-  if (useSVEForFixedLengthVectorVT(VT))
+  if (useSVEForFixedLengthVectorVT(VT,
+                                   Subtarget->forceStreamingCompatibleSVE()))
     return LowerFixedLengthExtractVectorElt(Op, DAG);
 
   // Check for non-constant or out of range lane.
@@ -12296,10 +12323,11 @@ SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
   // If this is extracting the upper 64-bits of a 128-bit vector, we match
   // that directly.
   if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 &&
-      InVT.getSizeInBits() == 128)
+      InVT.getSizeInBits() == 128 && !Subtarget->forceStreamingCompatibleSVE())
     return Op;
 
-  if (useSVEForFixedLengthVectorVT(InVT)) {
+  if (useSVEForFixedLengthVectorVT(InVT,
+                                   Subtarget->forceStreamingCompatibleSVE())) {
     SDLoc DL(Op);
 
     EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
@@ -12487,7 +12515,8 @@ SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {
 
 bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
   // Currently no fixed length shuffles that require SVE are legal.
-  if (useSVEForFixedLengthVectorVT(VT))
+  if (useSVEForFixedLengthVectorVT(VT,
+                                   Subtarget->forceStreamingCompatibleSVE()))
     return false;
 
   if (VT.getVectorNumElements() == 4 &&
@@ -12597,7 +12626,9 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
 
   switch (Op.getOpcode()) {
   case ISD::SHL:
-    if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT))
+    if (VT.isScalableVector() ||
+        useSVEForFixedLengthVectorVT(VT,
+                                     Subtarget->forceStreamingCompatibleSVE()))
       return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED);
 
     if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
@@ -12609,7 +12640,9 @@ SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
                        Op.getOperand(0), Op.getOperand(1));
   case ISD::SRA:
   case ISD::SRL:
-    if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT)) {
+    if (VT.isScalableVector() ||
+        useSVEForFixedLengthVectorVT(
+            VT, Subtarget->forceStreamingCompatibleSVE())) {
       unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED
                                                 : AArch64ISD::SRL_PRED;
       return LowerToPredicatedOp(Op, DAG, Opc);
@@ -14008,6 +14041,11 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
 bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
                                                   ShuffleVectorInst *SVI,
                                                   unsigned Factor) const {
+  // Skip if streaming compatible SVE is enabled, because it generates invalid
+  // code in streaming mode when SVE length is not specified.
+  if (Subtarget->forceStreamingCompatibleSVE())
+    return false;
+
   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
          "Invalid interleave factor");
 
@@ -22489,7 +22527,7 @@ SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
 SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
                                                  SelectionDAG &DAG) const {
   EVT VT = Op.getValueType();
-  assert(useSVEForFixedLengthVectorVT(VT) &&
+  assert(VT.isFixedLengthVector() && isTypeLegal(VT) &&
          "Only expected to lower fixed length vector operation!");
   EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
 
@@ -22505,7 +22543,8 @@ SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
     }
 
     // "cast" fixed length vector to a scalable vector.
-    assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
+    assert(V.getValueType().isFixedLengthVector() &&
+           isTypeLegal(V.getValueType()) &&
            "Only fixed length vectors are supported!");
     Ops.push_back(convertToScalableVector(DAG, ContainerVT, V));
   }

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 5d111dd3e74d..017fc63e214b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -899,6 +899,7 @@ class AArch64TargetLowering : public TargetLowering {
   bool isExtFreeImpl(const Instruction *Ext) const override;
 
   void addTypeForNEON(MVT VT);
+  void addTypeForStreamingSVE(MVT VT);
   void addTypeForFixedLengthSVE(MVT VT);
   void addDRTypeForNEON(MVT VT);
   void addQRTypeForNEON(MVT VT);

diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index bb5a9a251cba..14bc9edcadd3 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3032,7 +3032,7 @@ let Predicates = [HasSVEorSME] in {
             (EXTRACT_SUBREG (DUP_ZZI_D ZPR:$vec, sve_elm_idx_extdup_d:$index), dsub)>;
 
   // Extract element from vector with immediate index that's within the bottom 128-bits.
-  let AddedComplexity = 1 in {
+  let Predicates = [NotInStreamingSVEMode], AddedComplexity = 1 in {
   def : Pat<(i32 (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)),
             (i32 (UMOVvi8 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>;
   def : Pat<(i32 (vector_extract (nxv8i16 ZPR:$vec), VectorIndexH:$index)),
@@ -3041,8 +3041,9 @@ let Predicates = [HasSVEorSME] in {
             (i32 (UMOVvi32 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>;
   def : Pat<(i64 (vector_extract (nxv2i64 ZPR:$vec), VectorIndexD:$index)),
             (i64 (UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index))>;
-  }
+  } // End NotInStreamingSVEMode
 
+  let Predicates = [NotInStreamingSVEMode] in {
   def : Pat<(sext_inreg (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index), i8),
             (i32 (SMOVvi8to32 (v16i8 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexB:$index))>;
   def : Pat<(sext_inreg (anyext (vector_extract (nxv16i8 ZPR:$vec), VectorIndexB:$index)), i8),
@@ -3055,6 +3056,7 @@ let Predicates = [HasSVEorSME] in {
 
   def : Pat<(sext (vector_extract (nxv4i32 ZPR:$vec), VectorIndexS:$index)),
             (i64 (SMOVvi32to64 (v4i32 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexS:$index))>;
+  } // End NotInStreamingSVEMode
 
   // Extract first element from vector.
   let AddedComplexity = 2 in {

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
new file mode 100644
index 000000000000..e61105361247
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-build-vector.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define void @build_vector_7_inc1_v4i1(ptr %a) #0 {
+; CHECK-LABEL: build_vector_7_inc1_v4i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov w8, #5
+; CHECK-NEXT:    strb w8, [x0]
+; CHECK-NEXT:    ret
+  store <4 x i1> <i1 true, i1 false, i1 true, i1 false>, ptr %a, align 1
+  ret void
+}
+
+define void @build_vector_7_inc1_v32i8(ptr %a) #0 {
+; CHECK-LABEL: build_vector_7_inc1_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    index z0.b, #0, #1
+; CHECK-NEXT:    mov z1.d, z0.d
+; CHECK-NEXT:    add z0.b, z0.b, #7 // =0x7
+; CHECK-NEXT:    add z1.b, z1.b, #23 // =0x17
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  store <32 x i8> <i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 20, i8 21, i8 22, i8 23, i8 24, i8 25, i8 26, i8 27, i8 28, i8 29, i8 30, i8 31, i8 32, i8 33, i8 34, i8 35, i8 36, i8 37, i8 38>, ptr %a, align 1
+  ret void
+}
+
+define void @build_vector_0_inc2_v16i16(ptr %a) #0 {
+; CHECK-LABEL: build_vector_0_inc2_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    index z0.h, #0, #2
+; CHECK-NEXT:    str q0, [x0]
+; CHECK-NEXT:    add z0.h, z0.h, #16 // =0x10
+; CHECK-NEXT:    str q0, [x0, #16]
+; CHECK-NEXT:    ret
+  store <16 x i16> <i16 0, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30>, ptr %a, align 2
+  ret void
+}
+
+; Negative const stride.
+define void @build_vector_0_dec3_v8i32(ptr %a) #0 {
+; CHECK-LABEL: build_vector_0_dec3_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    index z0.s, #0, #-3
+; CHECK-NEXT:    mov z1.s, #-12 // =0xfffffffffffffff4
+; CHECK-NEXT:    add z1.s, z0.s, z1.s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  store <8 x i32> <i32 0, i32 -3, i32 -6, i32 -9, i32 -12, i32 -15, i32 -18, i32 -21>, ptr %a, align 4
+  ret void
+}
+
+; Constant stride that's too big to be directly encoded into the index.
+define void @build_vector_minus2_dec32_v4i64(ptr %a) #0 {
+; CHECK-LABEL: build_vector_minus2_dec32_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x8, #-32
+; CHECK-NEXT:    mov z0.d, #-66 // =0xffffffffffffffbe
+; CHECK-NEXT:    mov z2.d, #-2 // =0xfffffffffffffffe
+; CHECK-NEXT:    index z1.d, #0, x8
+; CHECK-NEXT:    add z0.d, z1.d, z0.d
+; CHECK-NEXT:    add z1.d, z1.d, z2.d
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  store <4 x i64> <i64 -2, i64 -34, i64 -66, i64 -98>, ptr %a, align 8
+  ret void
+}
+
+; Constant but not a sequence.
+define void @build_vector_no_stride_v4i64(ptr %a) #0 {
+; CHECK-LABEL: build_vector_no_stride_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    index z0.d, #1, #7
+; CHECK-NEXT:    index z1.d, #0, #4
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  store <4 x i64> <i64 0, i64 4, i64 1, i64 8>, ptr %a, align 8
+  ret void
+}
+
+define void @build_vector_0_inc2_v16f16(ptr %a) #0 {
+; CHECK-LABEL: build_vector_0_inc2_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI6_0
+; CHECK-NEXT:    adrp x9, .LCPI6_1
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI6_0]
+; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI6_1]
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  store <16 x half> <half 0.0, half 2.0, half 4.0, half 6.0, half 8.0, half 10.0, half 12.0, half 14.0, half 16.0, half 18.0, half 20.0, half 22.0, half 24.0, half 26.0, half 28.0, half 30.0>, ptr %a, align 2
+  ret void
+}
+
+; Negative const stride.
+define void @build_vector_0_dec3_v8f32(ptr %a) #0 {
+; CHECK-LABEL: build_vector_0_dec3_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI7_0
+; CHECK-NEXT:    adrp x9, .LCPI7_1
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI7_0]
+; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI7_1]
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  store <8 x float> <float 0.0, float -3.0, float -6.0, float -9.0, float -12.0, float -15.0, float -18.0, float -21.0>, ptr %a, align 4
+  ret void
+}
+
+; Constant stride that's too big to be directly encoded into the index.
+define void @build_vector_minus2_dec32_v4f64(ptr %a) #0 {
+; CHECK-LABEL: build_vector_minus2_dec32_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI8_0
+; CHECK-NEXT:    adrp x9, .LCPI8_1
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI8_1]
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  store <4 x double> <double -2.0, double -34.0, double -66.0, double -98.0>, ptr %a, align 8
+  ret void
+}
+
+; Constant but not a sequence.
+define void @build_vector_no_stride_v4f64(ptr %a) #0 {
+; CHECK-LABEL: build_vector_no_stride_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI9_0
+; CHECK-NEXT:    adrp x9, .LCPI9_1
+; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI9_0]
+; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI9_1]
+; CHECK-NEXT:    stp q1, q0, [x0]
+; CHECK-NEXT:    ret
+  store <4 x double> <double 0.0, double 4.0, double 1.0, double 8.0>, ptr %a, align 8
+  ret void
+}
+
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
new file mode 100644
index 000000000000..fc58ab36ad2e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-concat.ll
@@ -0,0 +1,577 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; i8
+;
+
+define <8 x i8> @concat_v8i8(<4 x i8> %op1, <4 x i8> %op2)  #0 {
+; CHECK-LABEL: concat_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    mov z2.h, z1.h[3]
+; CHECK-NEXT:    mov z3.h, z1.h[2]
+; CHECK-NEXT:    mov z4.h, z1.h[1]
+; CHECK-NEXT:    fmov w10, s2
+; CHECK-NEXT:    strb w8, [sp, #12]
+; CHECK-NEXT:    fmov w8, s3
+; CHECK-NEXT:    strb w9, [sp, #8]
+; CHECK-NEXT:    fmov w9, s4
+; CHECK-NEXT:    mov z1.h, z0.h[3]
+; CHECK-NEXT:    mov z5.h, z0.h[2]
+; CHECK-NEXT:    mov z0.h, z0.h[1]
+; CHECK-NEXT:    strb w10, [sp, #15]
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    strb w8, [sp, #14]
+; CHECK-NEXT:    fmov w8, s5
+; CHECK-NEXT:    strb w9, [sp, #13]
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    strb w10, [sp, #11]
+; CHECK-NEXT:    strb w8, [sp, #10]
+; CHECK-NEXT:    strb w9, [sp, #9]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %res = shufflevector <4 x i8> %op1, <4 x i8> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @concat_v16i8(<8 x i8> %op1, <8 x i8> %op2)  #0 {
+; CHECK-LABEL: concat_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    splice z0.b, p0, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <8 x i8> %op1, <8 x i8> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                 i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %res
+}
+
+define void @concat_v32i8(<16 x i8>* %a, <16 x i8>* %b, <32 x i8>* %c)  #0 {
+; CHECK-LABEL: concat_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    stp q1, q0, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i8>, <16 x i8>* %a
+  %op2 = load <16 x i8>, <16 x i8>* %b
+  %res = shufflevector <16 x i8> %op1, <16 x i8> %op2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                   i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+                                                                   i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+                                                                   i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  store <32 x i8> %res, <32 x i8>* %c
+  ret void
+}
+
+define void @concat_v64i8(<32 x i8>* %a, <32 x i8>* %b, <64 x i8>* %c) #0 {
+; CHECK-LABEL: concat_v64i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    stp q0, q1, [x2, #32]
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, <32 x i8>* %a
+  %op2 = load <32 x i8>, <32 x i8>* %b
+  %res = shufflevector <32 x i8> %op1, <32 x i8> %op2, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                   i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+                                                                   i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+                                                                   i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31,
+                                                                   i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39,
+                                                                   i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47,
+                                                                   i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55,
+                                                                   i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  store <64 x i8> %res, <64 x i8>* %c
+  ret void
+}
+
+;
+; i16
+;
+
+define <4 x i16> @concat_v4i16(<2 x i16> %op1, <2 x i16> %op2)  #0 {
+; CHECK-LABEL: concat_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    fmov w9, s0
+; CHECK-NEXT:    mov z1.s, z1.s[1]
+; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w11, s0
+; CHECK-NEXT:    strh w8, [sp, #12]
+; CHECK-NEXT:    strh w9, [sp, #8]
+; CHECK-NEXT:    strh w10, [sp, #14]
+; CHECK-NEXT:    strh w11, [sp, #10]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %res = shufflevector <2 x i16> %op1, <2 x i16> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i16> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <8 x i16> @concat_v8i16(<4 x i16> %op1, <4 x i16> %op2)  #0 {
+; CHECK-LABEL: concat_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <4 x i16> %op1, <4 x i16> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %res
+}
+
+define void @concat_v16i16(<8 x i16>* %a, <8 x i16>* %b, <16 x i16>* %c)  #0 {
+; CHECK-LABEL: concat_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    stp q1, q0, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i16>, <8 x i16>* %a
+  %op2 = load <8 x i16>, <8 x i16>* %b
+  %res = shufflevector <8 x i16> %op1, <8 x i16> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                   i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  store <16 x i16> %res, <16 x i16>* %c
+  ret void
+}
+
+define void @concat_v32i16(<16 x i16>* %a, <16 x i16>* %b, <32 x i16>* %c) #0 {
+; CHECK-LABEL: concat_v32i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    stp q0, q1, [x2, #32]
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op2 = load <16 x i16>, <16 x i16>* %b
+  %res = shufflevector <16 x i16> %op1, <16 x i16> %op2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                     i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+                                                                     i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+                                                                     i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  store <32 x i16> %res, <32 x i16>* %c
+  ret void
+}
+
+;
+; i32
+;
+
+; Don't use SVE for 64-bit vectors.
+define <2 x i32> @concat_v2i32(<1 x i32> %op1, <1 x i32> %op2)  #0 {
+; CHECK-LABEL: concat_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <1 x i32> %op1, <1 x i32> %op2, <2 x i32> <i32 0, i32 1>
+  ret <2 x i32> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x i32> @concat_v4i32(<2 x i32> %op1, <2 x i32> %op2)  #0 {
+; CHECK-LABEL: concat_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <2 x i32> %op1, <2 x i32> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x i32> %res
+}
+
+define void @concat_v8i32(<4 x i32>* %a, <4 x i32>* %b, <8 x i32>* %c)  #0 {
+; CHECK-LABEL: concat_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    stp q1, q0, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i32>, <4 x i32>* %a
+  %op2 = load <4 x i32>, <4 x i32>* %b
+  %res = shufflevector <4 x i32> %op1, <4 x i32> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  store <8 x i32> %res, <8 x i32>* %c
+  ret void
+}
+
+define void @concat_v16i32(<8 x i32>* %a, <8 x i32>* %b, <16 x i32>* %c) #0 {
+; CHECK-LABEL: concat_v16i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    stp q0, q1, [x2, #32]
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op2 = load <8 x i32>, <8 x i32>* %b
+  %res = shufflevector <8 x i32> %op1, <8 x i32> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                   i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  store <16 x i32> %res, <16 x i32>* %c
+  ret void
+}
+
+;
+; i64
+;
+
+; Don't use SVE for 128-bit vectors.
+define <2 x i64> @concat_v2i64(<1 x i64> %op1, <1 x i64> %op2)  #0 {
+; CHECK-LABEL: concat_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <1 x i64> %op1, <1 x i64> %op2, <2 x i32> <i32 0, i32 1>
+  ret <2 x i64> %res
+}
+
+define void @concat_v4i64(<2 x i64>* %a, <2 x i64>* %b, <4 x i64>* %c)  #0 {
+; CHECK-LABEL: concat_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    stp q1, q0, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x i64>, <2 x i64>* %a
+  %op2 = load <2 x i64>, <2 x i64>* %b
+  %res = shufflevector <2 x i64> %op1, <2 x i64> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  store <4 x i64> %res, <4 x i64>* %c
+  ret void
+}
+
+define void @concat_v8i64(<4 x i64>* %a, <4 x i64>* %b, <8 x i64>* %c) #0 {
+; CHECK-LABEL: concat_v8i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    stp q0, q1, [x2, #32]
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op2 = load <4 x i64>, <4 x i64>* %b
+  %res = shufflevector <4 x i64> %op1, <4 x i64> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  store <8 x i64> %res, <8 x i64>* %c
+  ret void
+}
+
+;
+; f16
+;
+
+define <4 x half> @concat_v4f16(<2 x half> %op1, <2 x half> %op2)  #0 {
+; CHECK-LABEL: concat_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    str h1, [sp, #12]
+; CHECK-NEXT:    str h0, [sp, #8]
+; CHECK-NEXT:    mov z1.h, z1.h[1]
+; CHECK-NEXT:    mov z0.h, z0.h[1]
+; CHECK-NEXT:    str h1, [sp, #14]
+; CHECK-NEXT:    str h0, [sp, #10]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %res = shufflevector <2 x half> %op1, <2 x half> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x half> %res
+}
+
+define <8 x half> @concat_v8f16(<4 x half> %op1, <4 x half> %op2)  #0 {
+; CHECK-LABEL: concat_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    splice z0.h, p0, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <4 x half> %op1, <4 x half> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x half> %res
+}
+
+define void @concat_v16f16(<8 x half>* %a, <8 x half>* %b, <16 x half>* %c)  #0 {
+; CHECK-LABEL: concat_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    stp q1, q0, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x half>, <8 x half>* %a
+  %op2 = load <8 x half>, <8 x half>* %b
+  %res = shufflevector <8 x half> %op1, <8 x half> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                     i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  store <16 x half> %res, <16 x half>* %c
+  ret void
+}
+
+define void @concat_v32f16(<16 x half>* %a, <16 x half>* %b, <32 x half>* %c) #0 {
+; CHECK-LABEL: concat_v32f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    stp q0, q1, [x2, #32]
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x half>, <16 x half>* %a
+  %op2 = load <16 x half>, <16 x half>* %b
+  %res = shufflevector <16 x half> %op1, <16 x half> %op2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                       i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+                                                                       i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+                                                                       i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  store <32 x half> %res, <32 x half>* %c
+  ret void
+}
+
+;
+; i32
+;
+
+; Don't use SVE for 64-bit vectors.
+define <2 x float> @concat_v2f32(<1 x float> %op1, <1 x float> %op2)  #0 {
+; CHECK-LABEL: concat_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    zip1 z0.s, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <1 x float> %op1, <1 x float> %op2, <2 x i32> <i32 0, i32 1>
+  ret <2 x float> %res
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x float> @concat_v4f32(<2 x float> %op1, <2 x float> %op2)  #0 {
+; CHECK-LABEL: concat_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    splice z0.s, p0, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <2 x float> %op1, <2 x float> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  ret <4 x float> %res
+}
+
+define void @concat_v8f32(<4 x float>* %a, <4 x float>* %b, <8 x float>* %c)  #0 {
+; CHECK-LABEL: concat_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    stp q1, q0, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x float>, <4 x float>* %a
+  %op2 = load <4 x float>, <4 x float>* %b
+  %res = shufflevector <4 x float> %op1, <4 x float> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  store <8 x float> %res, <8 x float>* %c
+  ret void
+}
+
+define void @concat_v16f32(<8 x float>* %a, <8 x float>* %b, <16 x float>* %c) #0 {
+; CHECK-LABEL: concat_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    stp q0, q1, [x2, #32]
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x float>, <8 x float>* %a
+  %op2 = load <8 x float>, <8 x float>* %b
+  %res = shufflevector <8 x float> %op1, <8 x float> %op2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                       i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  store <16 x float> %res, <16 x float>* %c
+  ret void
+}
+
+;
+; f64
+;
+
+; Don't use SVE for 128-bit vectors.
+define <2 x double> @concat_v2f64(<1 x double> %op1, <1 x double> %op2)  #0 {
+; CHECK-LABEL: concat_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    splice z0.d, p0, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shufflevector <1 x double> %op1, <1 x double> %op2, <2 x i32> <i32 0, i32 1>
+  ret <2 x double> %res
+}
+
+define void @concat_v4f64(<2 x double>* %a, <2 x double>* %b, <4 x double>* %c)  #0 {
+; CHECK-LABEL: concat_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x1]
+; CHECK-NEXT:    ldr q1, [x0]
+; CHECK-NEXT:    stp q1, q0, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x double>, <2 x double>* %a
+  %op2 = load <2 x double>, <2 x double>* %b
+  %res = shufflevector <2 x double> %op1, <2 x double> %op2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  store <4 x double> %res, <4 x double>* %c
+  ret void
+}
+
+define void @concat_v8f64(<4 x double>* %a, <4 x double>* %b, <8 x double>* %c) #0 {
+; CHECK-LABEL: concat_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x1]
+; CHECK-NEXT:    ldp q2, q3, [x0]
+; CHECK-NEXT:    stp q0, q1, [x2, #32]
+; CHECK-NEXT:    stp q2, q3, [x2]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x double>, <4 x double>* %a
+  %op2 = load <4 x double>, <4 x double>* %b
+  %res = shufflevector <4 x double> %op1, <4 x double> %op2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  store <8 x double> %res, <8 x double>* %c
+  ret void
+}
+
+;
+; undef
+;
+
+define void @concat_v32i8_undef(<16 x i8>* %a, <32 x i8>* %b)  #0 {
+; CHECK-LABEL: concat_v32i8_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i8>, <16 x i8>* %a
+  %res = shufflevector <16 x i8> %op1, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                    i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+                                                                    i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+                                                                    i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  store <32 x i8> %res, <32 x i8>* %b
+  ret void
+}
+
+define void @concat_v16i16_undef(<8 x i16>* %a, <16 x i16>* %b)  #0 {
+; CHECK-LABEL: concat_v16i16_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i16>, <8 x i16>* %a
+  %res = shufflevector <8 x i16> %op1, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                    i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  store <16 x i16> %res, <16 x i16>* %b
+  ret void
+}
+
+define void @concat_v8i32_undef(<4 x i32>* %a, <8 x i32>* %b)  #0 {
+; CHECK-LABEL: concat_v8i32_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i32>, <4 x i32>* %a
+  %res = shufflevector <4 x i32> %op1, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  store <8 x i32> %res, <8 x i32>* %b
+  ret void
+}
+
+define void @concat_v4i64_undef(<2 x i64>* %a, <4 x i64>* %b)  #0 {
+; CHECK-LABEL: concat_v4i64_undef:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x i64>, <2 x i64>* %a
+  %res = shufflevector <2 x i64> %op1, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  store <4 x i64> %res, <4 x i64>* %b
+  ret void
+}
+
+;
+; > 2 operands
+;
+
+define void @concat_v32i8_4op(<8 x i8>* %a, <32 x i8>* %b)  #0 {
+; CHECK-LABEL: concat_v32i8_4op:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i8>, <8 x i8>* %a
+  %shuffle = shufflevector <8 x i8> %op1, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                      i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %res = shufflevector <16 x i8> %shuffle, <16 x i8> undef, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                        i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15,
+                                                                        i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23,
+                                                                        i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  store <32 x i8> %res, <32 x i8>* %b
+  ret void
+}
+
+define void @concat_v16i16_4op(<4 x i16>* %a, <16 x i16>* %b)  #0 {
+; CHECK-LABEL: concat_v16i16_4op:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i16>, <4 x i16>* %a
+  %shuffle = shufflevector <4 x i16> %op1, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  %res = shufflevector <8 x i16> %shuffle, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7,
+                                                                        i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  store <16 x i16> %res, <16 x i16>* %b
+  ret void
+}
+
+define void @concat_v8i32_4op(<2 x i32>* %a, <8 x i32>* %b)  #0 {
+; CHECK-LABEL: concat_v8i32_4op:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x i32>, <2 x i32>* %a
+  %shuffle = shufflevector <2 x i32> %op1, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %res = shufflevector <4 x i32> %shuffle, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  store <8 x i32> %res, <8 x i32>* %b
+  ret void
+}
+
+define void @concat_v4i64_4op(<1 x i64>* %a, <4 x i64>* %b)  #0 {
+; CHECK-LABEL: concat_v4i64_4op:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <1 x i64>, <1 x i64>* %a
+  %shuffle = shufflevector <1 x i64> %op1, <1 x i64> undef, <2 x i32> <i32 0, i32 1>
+  %res = shufflevector <2 x i64> %shuffle, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  store <4 x i64> %res, <4 x i64>* %b
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
new file mode 100644
index 000000000000..8a6d1903c8f6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-subvector.ll
@@ -0,0 +1,322 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; i1
+
+define <4 x i1> @extract_subvector_v8i1(<8 x i1> %op) #0 {
+; CHECK-LABEL: extract_subvector_v8i1:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z1.b, z0.b[7]
+; CHECK-NEXT:    mov z2.b, z0.b[6]
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    mov z1.b, z0.b[5]
+; CHECK-NEXT:    mov z0.b, z0.b[4]
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w11, s0
+; CHECK-NEXT:    strh w8, [sp, #14]
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    strh w10, [sp, #10]
+; CHECK-NEXT:    strh w11, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %ret = call <4 x i1> @llvm.vector.extract.v4i1.v8i1(<8 x i1> %op, i64 4)
+  ret <4 x i1> %ret
+}
+
+; i8
+
+define <4 x i8> @extract_subvector_v8i8(<8 x i8> %op) #0 {
+; CHECK-LABEL: extract_subvector_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z1.b, z0.b[7]
+; CHECK-NEXT:    mov z2.b, z0.b[6]
+; CHECK-NEXT:    fmov w8, s1
+; CHECK-NEXT:    mov z1.b, z0.b[5]
+; CHECK-NEXT:    mov z0.b, z0.b[4]
+; CHECK-NEXT:    fmov w9, s2
+; CHECK-NEXT:    fmov w10, s1
+; CHECK-NEXT:    fmov w11, s0
+; CHECK-NEXT:    strh w8, [sp, #14]
+; CHECK-NEXT:    strh w9, [sp, #12]
+; CHECK-NEXT:    strh w10, [sp, #10]
+; CHECK-NEXT:    strh w11, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %ret = call <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4)
+  ret <4 x i8> %ret
+}
+
+define <8 x i8> @extract_subvector_v16i8(<16 x i8> %op) #0 {
+; CHECK-LABEL: extract_subvector_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8)
+  ret <8 x i8> %ret
+}
+
+define void @extract_subvector_v32i8(<32 x i8>* %a, <16 x i8>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %ret = call <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16)
+  store <16 x i8> %ret, <16 x i8>* %b
+  ret void
+}
+
+; i16
+
+define <2 x i16> @extract_subvector_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2)
+  ret <2 x i16> %ret
+}
+
+define <4 x i16> @extract_subvector_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: extract_subvector_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4)
+  ret <4 x i16> %ret
+}
+
+define void @extract_subvector_v16i16(<16 x i16>* %a, <8 x i16>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %ret = call <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8)
+  store <8 x i16> %ret, <8 x i16>* %b
+  ret void
+}
+
+; i32
+
+define <1 x i32> @extract_subvector_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    insr z0.s, w8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1)
+  ret <1 x i32> %ret
+}
+
+define <2 x i32> @extract_subvector_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2)
+  ret <2 x i32> %ret
+}
+
+define void @extract_subvector_v8i32(<8 x i32>* %a, <4 x i32>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %ret = call <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4)
+  store <4 x i32> %ret, <4 x i32>* %b
+  ret void
+}
+
+; i64
+
+define <1 x i64> @extract_subvector_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1)
+  ret <1 x i64> %ret
+}
+
+define void @extract_subvector_v4i64(<4 x i64>* %a, <2 x i64>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %ret = call <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2)
+  store <2 x i64> %ret, <2 x i64>* %b
+  ret void
+}
+
+; f16
+
+define <2 x half> @extract_subvector_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z1.h, z0.h[3]
+; CHECK-NEXT:    mov z0.h, z0.h[2]
+; CHECK-NEXT:    str h1, [sp, #10]
+; CHECK-NEXT:    str h0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %ret = call <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2)
+  ret <2 x half> %ret
+}
+
+define <4 x half> @extract_subvector_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: extract_subvector_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4)
+  ret <4 x half> %ret
+}
+
+define void @extract_subvector_v16f16(<16 x half>* %a, <8 x half>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %ret = call <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8)
+  store <8 x half> %ret, <8 x half>* %b
+  ret void
+}
+
+; f32
+
+define <1 x float> @extract_subvector_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    insr z0.s, s0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1)
+  ret <1 x float> %ret
+}
+
+define <2 x float> @extract_subvector_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2)
+  ret <2 x float> %ret
+}
+
+define void @extract_subvector_v8f32(<8 x float>* %a, <4 x float>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %ret = call <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4)
+  store <4 x float> %ret, <4 x float>* %b
+  ret void
+}
+
+; f64
+
+define <1 x double> @extract_subvector_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %ret = call <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1)
+  ret <1 x double> %ret
+}
+
+define void @extract_subvector_v4f64(<4 x double>* %a, <2 x double>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %ret = call <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2)
+  store <2 x double> %ret, <2 x double>* %b
+  ret void
+}
+
+declare <4 x i1> @llvm.vector.extract.v4i1.v8i1(<8 x i1>, i64)
+
+declare <4 x i8> @llvm.vector.extract.v4i8.v8i8(<8 x i8>, i64)
+declare <8 x i8> @llvm.vector.extract.v8i8.v16i8(<16 x i8>, i64)
+declare <16 x i8> @llvm.vector.extract.v16i8.v32i8(<32 x i8>, i64)
+declare <32 x i8> @llvm.vector.extract.v32i8.v64i8(<64 x i8>, i64)
+
+declare <2 x i16> @llvm.vector.extract.v2i16.v4i16(<4 x i16>, i64)
+declare <4 x i16> @llvm.vector.extract.v4i16.v8i16(<8 x i16>, i64)
+declare <8 x i16> @llvm.vector.extract.v8i16.v16i16(<16 x i16>, i64)
+declare <16 x i16> @llvm.vector.extract.v16i16.v32i16(<32 x i16>, i64)
+
+declare <1 x i32> @llvm.vector.extract.v1i32.v2i32(<2 x i32>, i64)
+declare <2 x i32> @llvm.vector.extract.v2i32.v4i32(<4 x i32>, i64)
+declare <4 x i32> @llvm.vector.extract.v4i32.v8i32(<8 x i32>, i64)
+declare <8 x i32> @llvm.vector.extract.v8i32.v16i32(<16 x i32>, i64)
+
+declare <1 x i64> @llvm.vector.extract.v1i64.v2i64(<2 x i64>, i64)
+declare <2 x i64> @llvm.vector.extract.v2i64.v4i64(<4 x i64>, i64)
+declare <4 x i64> @llvm.vector.extract.v4i64.v8i64(<8 x i64>, i64)
+
+declare <2 x half> @llvm.vector.extract.v2f16.v4f16(<4 x half>, i64)
+declare <4 x half> @llvm.vector.extract.v4f16.v8f16(<8 x half>, i64)
+declare <8 x half> @llvm.vector.extract.v8f16.v16f16(<16 x half>, i64)
+declare <16 x half> @llvm.vector.extract.v16f16.v32f16(<32 x half>, i64)
+
+declare <1 x float> @llvm.vector.extract.v1f32.v2f32(<2 x float>, i64)
+declare <2 x float> @llvm.vector.extract.v2f32.v4f32(<4 x float>, i64)
+declare <4 x float> @llvm.vector.extract.v4f32.v8f32(<8 x float>, i64)
+declare <8 x float> @llvm.vector.extract.v8f32.v16f32(<16 x float>, i64)
+
+declare <1 x double> @llvm.vector.extract.v1f64.v2f64(<2 x double>, i64)
+declare <2 x double> @llvm.vector.extract.v2f64.v4f64(<4 x double>, i64)
+declare <4 x double> @llvm.vector.extract.v4f64.v8f64(<8 x double>, i64)
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
new file mode 100644
index 000000000000..ad7e637afeea
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-extract-vector-elt.ll
@@ -0,0 +1,121 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; extractelement
+;
+
+define half @extractelement_v2f16(<2 x half> %op1) #0 {
+; CHECK-LABEL: extractelement_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z0.h, z0.h[1]
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
+; CHECK-NEXT:    ret
+  %r = extractelement <2 x half> %op1, i64 1
+  ret half %r
+}
+
+define half @extractelement_v4f16(<4 x half> %op1) #0 {
+; CHECK-LABEL: extractelement_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z0.h, z0.h[3]
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
+; CHECK-NEXT:    ret
+  %r = extractelement <4 x half> %op1, i64 3
+  ret half %r
+}
+
+define half @extractelement_v8f16(<8 x half> %op1) #0 {
+; CHECK-LABEL: extractelement_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    mov z0.h, z0.h[7]
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
+; CHECK-NEXT:    ret
+  %r = extractelement <8 x half> %op1, i64 7
+  ret half %r
+}
+
+define half @extractelement_v16f16(<16 x half>* %a) #0 {
+; CHECK-LABEL: extractelement_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    mov z0.h, z0.h[7]
+; CHECK-NEXT:    // kill: def $h0 killed $h0 killed $z0
+; CHECK-NEXT:    ret
+  %op1 = load <16 x half>, <16 x half>* %a
+  %r = extractelement <16 x half> %op1, i64 15
+  ret half %r
+}
+
+define float @extractelement_v2f32(<2 x float> %op1) #0 {
+; CHECK-LABEL: extractelement_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    mov z0.s, z0.s[1]
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
+; CHECK-NEXT:    ret
+  %r = extractelement <2 x float> %op1, i64 1
+  ret float %r
+}
+
+define float @extractelement_v4f32(<4 x float> %op1) #0 {
+; CHECK-LABEL: extractelement_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    mov z0.s, z0.s[3]
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
+; CHECK-NEXT:    ret
+  %r = extractelement <4 x float> %op1, i64 3
+  ret float %r
+}
+
+define float @extractelement_v8f32(<8 x float>* %a) #0 {
+; CHECK-LABEL: extractelement_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    mov z0.s, z0.s[3]
+; CHECK-NEXT:    // kill: def $s0 killed $s0 killed $z0
+; CHECK-NEXT:    ret
+  %op1 = load <8 x float>, <8 x float>* %a
+  %r = extractelement <8 x float> %op1, i64 7
+  ret float %r
+}
+
+define double @extractelement_v1f64(<1 x double> %op1) #0 {
+; CHECK-LABEL: extractelement_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %r = extractelement <1 x double> %op1, i64 0
+  ret double %r
+}
+define double @extractelement_v2f64(<2 x double> %op1) #0 {
+; CHECK-LABEL: extractelement_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    mov z0.d, z0.d[1]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %r = extractelement <2 x double> %op1, i64 1
+  ret double %r
+}
+
+define double @extractelement_v4f64(<4 x double>* %a) #0 {
+; CHECK-LABEL: extractelement_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0, #16]
+; CHECK-NEXT:    mov z0.d, z0.d[1]
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %op1 = load <4 x double>, <4 x double>* %a
+  %r = extractelement <4 x double> %op1, i64 3
+  ret double %r
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
new file mode 100644
index 000000000000..2c2310cf340c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-int-shifts.ll
@@ -0,0 +1,640 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; ASHR
+;
+
+define <4 x i8> @ashr_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
+; CHECK-LABEL: ashr_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI0_0
+; CHECK-NEXT:    adrp x9, .LCPI0_1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_0]
+; CHECK-NEXT:    ldr d3, [x9, :lo12:.LCPI0_1]
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    and z1.d, z1.d, z3.d
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <4 x i8> %op1, %op2
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @ashr_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
+; CHECK-LABEL: ashr_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <8 x i8> %op1, %op2
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @ashr_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
+; CHECK-LABEL: ashr_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <16 x i8> %op1, %op2
+  ret <16 x i8> %res
+}
+
+define void @ashr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+; CHECK-LABEL: ashr_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    asr z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    asr z1.b, p0/m, z1.b, z3.b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, <32 x i8>* %a
+  %op2 = load <32 x i8>, <32 x i8>* %b
+  %res = ashr <32 x i8> %op1, %op2
+  store <32 x i8> %res, <32 x i8>* %a
+  ret void
+}
+
+define <2 x i16> @ashr_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 {
+; CHECK-LABEL: ashr_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    adrp x9, .LCPI4_1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI4_0]
+; CHECK-NEXT:    ldr d3, [x9, :lo12:.LCPI4_1]
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    and z1.d, z1.d, z3.d
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <2 x i16> %op1, %op2
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @ashr_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
+; CHECK-LABEL: ashr_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <4 x i16> %op1, %op2
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @ashr_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
+; CHECK-LABEL: ashr_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <8 x i16> %op1, %op2
+  ret <8 x i16> %res
+}
+
+define void @ashr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+; CHECK-LABEL: ashr_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    asr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    asr z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op2 = load <16 x i16>, <16 x i16>* %b
+  %res = ashr <16 x i16> %op1, %op2
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define <2 x i32> @ashr_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
+; CHECK-LABEL: ashr_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <2 x i32> %op1, %op2
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @ashr_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
+; CHECK-LABEL: ashr_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <4 x i32> %op1, %op2
+  ret <4 x i32> %res
+}
+
+define void @ashr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+; CHECK-LABEL: ashr_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    asr z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    asr z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op2 = load <8 x i32>, <8 x i32>* %b
+  %res = ashr <8 x i32> %op1, %op2
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define <1 x i64> @ashr_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
+; CHECK-LABEL: ashr_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <1 x i64> %op1, %op2
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @ashr_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
+; CHECK-LABEL: ashr_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = ashr <2 x i64> %op1, %op2
+  ret <2 x i64> %res
+}
+
+define void @ashr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+; CHECK-LABEL: ashr_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    asr z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    asr z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op2 = load <4 x i64>, <4 x i64>* %b
+  %res = ashr <4 x i64> %op1, %op2
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+;
+; LSHR
+;
+
+define <4 x i8> @lshr_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
+; CHECK-LABEL: lshr_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI14_0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI14_0]
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <4 x i8> %op1, %op2
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @lshr_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
+; CHECK-LABEL: lshr_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <8 x i8> %op1, %op2
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @lshr_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
+; CHECK-LABEL: lshr_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <16 x i8> %op1, %op2
+  ret <16 x i8> %res
+}
+
+define void @lshr_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+; CHECK-LABEL: lshr_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsr z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    lsr z1.b, p0/m, z1.b, z3.b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, <32 x i8>* %a
+  %op2 = load <32 x i8>, <32 x i8>* %b
+  %res = lshr <32 x i8> %op1, %op2
+  store <32 x i8> %res, <32 x i8>* %a
+  ret void
+}
+
+define <2 x i16> @lshr_v2i16(<2 x i16> %op1, <2 x i16> %op2) #0 {
+; CHECK-LABEL: lshr_v2i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI18_0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI18_0]
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    and z0.d, z0.d, z2.d
+; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <2 x i16> %op1, %op2
+  ret <2 x i16> %res
+}
+
+define <4 x i16> @lshr_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
+; CHECK-LABEL: lshr_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <4 x i16> %op1, %op2
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @lshr_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
+; CHECK-LABEL: lshr_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <8 x i16> %op1, %op2
+  ret <8 x i16> %res
+}
+
+define void @lshr_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+; CHECK-LABEL: lshr_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsr z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    lsr z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op2 = load <16 x i16>, <16 x i16>* %b
+  %res = lshr <16 x i16> %op1, %op2
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define <2 x i32> @lshr_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
+; CHECK-LABEL: lshr_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <2 x i32> %op1, %op2
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @lshr_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
+; CHECK-LABEL: lshr_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <4 x i32> %op1, %op2
+  ret <4 x i32> %res
+}
+
+define void @lshr_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+; CHECK-LABEL: lshr_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsr z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    lsr z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op2 = load <8 x i32>, <8 x i32>* %b
+  %res = lshr <8 x i32> %op1, %op2
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define <1 x i64> @lshr_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
+; CHECK-LABEL: lshr_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <1 x i64> %op1, %op2
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @lshr_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
+; CHECK-LABEL: lshr_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = lshr <2 x i64> %op1, %op2
+  ret <2 x i64> %res
+}
+
+define void @lshr_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+; CHECK-LABEL: lshr_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsr z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    lsr z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op2 = load <4 x i64>, <4 x i64>* %b
+  %res = lshr <4 x i64> %op1, %op2
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+;
+; SHL
+;
+
+define <2 x i8> @shl_v2i8(<2 x i8> %op1, <2 x i8> %op2) #0 {
+; CHECK-LABEL: shl_v2i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI28_0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI28_0]
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <2 x i8> %op1, %op2
+  ret <2 x i8> %res
+}
+
+define <4 x i8> @shl_v4i8(<4 x i8> %op1, <4 x i8> %op2) #0 {
+; CHECK-LABEL: shl_v4i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    adrp x8, .LCPI29_0
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI29_0]
+; CHECK-NEXT:    and z1.d, z1.d, z2.d
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <4 x i8> %op1, %op2
+  ret <4 x i8> %res
+}
+
+define <8 x i8> @shl_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
+; CHECK-LABEL: shl_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl8
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <8 x i8> %op1, %op2
+  ret <8 x i8> %res
+}
+
+define <16 x i8> @shl_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
+; CHECK-LABEL: shl_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z1.b
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <16 x i8> %op1, %op2
+  ret <16 x i8> %res
+}
+
+define void @shl_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
+; CHECK-LABEL: shl_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.b, vl16
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsl z0.b, p0/m, z0.b, z2.b
+; CHECK-NEXT:    lsl z1.b, p0/m, z1.b, z3.b
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <32 x i8>, <32 x i8>* %a
+  %op2 = load <32 x i8>, <32 x i8>* %b
+  %res = shl <32 x i8> %op1, %op2
+  store <32 x i8> %res, <32 x i8>* %a
+  ret void
+}
+
+define <4 x i16> @shl_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
+; CHECK-LABEL: shl_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <4 x i16> %op1, %op2
+  ret <4 x i16> %res
+}
+
+define <8 x i16> @shl_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
+; CHECK-LABEL: shl_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z1.h
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <8 x i16> %op1, %op2
+  ret <8 x i16> %res
+}
+
+define void @shl_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
+; CHECK-LABEL: shl_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsl z0.h, p0/m, z0.h, z2.h
+; CHECK-NEXT:    lsl z1.h, p0/m, z1.h, z3.h
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <16 x i16>, <16 x i16>* %a
+  %op2 = load <16 x i16>, <16 x i16>* %b
+  %res = shl <16 x i16> %op1, %op2
+  store <16 x i16> %res, <16 x i16>* %a
+  ret void
+}
+
+define <2 x i32> @shl_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
+; CHECK-LABEL: shl_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <2 x i32> %op1, %op2
+  ret <2 x i32> %res
+}
+
+define <4 x i32> @shl_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
+; CHECK-LABEL: shl_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z1.s
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <4 x i32> %op1, %op2
+  ret <4 x i32> %res
+}
+
+define void @shl_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
+; CHECK-LABEL: shl_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsl z0.s, p0/m, z0.s, z2.s
+; CHECK-NEXT:    lsl z1.s, p0/m, z1.s, z3.s
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x i32>, <8 x i32>* %a
+  %op2 = load <8 x i32>, <8 x i32>* %b
+  %res = shl <8 x i32> %op1, %op2
+  store <8 x i32> %res, <8 x i32>* %a
+  ret void
+}
+
+define <1 x i64> @shl_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
+; CHECK-LABEL: shl_v1i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    // kill: def $d1 killed $d1 def $z1
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <1 x i64> %op1, %op2
+  ret <1 x i64> %res
+}
+
+define <2 x i64> @shl_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
+; CHECK-LABEL: shl_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    // kill: def $q1 killed $q1 def $z1
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z1.d
+; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
+; CHECK-NEXT:    ret
+  %res = shl <2 x i64> %op1, %op2
+  ret <2 x i64> %res
+}
+
+define void @shl_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
+; CHECK-LABEL: shl_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q2, q3, [x1]
+; CHECK-NEXT:    lsl z0.d, p0/m, z0.d, z2.d
+; CHECK-NEXT:    lsl z1.d, p0/m, z1.d, z3.d
+; CHECK-NEXT:    stp q0, q1, [x0]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x i64>, <4 x i64>* %a
+  %op2 = load <4 x i64>, <4 x i64>* %b
+  %res = shl <4 x i64> %op1, %op2
+  store <4 x i64> %res, <4 x i64>* %a
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
index ee4ef1da16af..953429836eb9 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
@@ -6,8 +6,8 @@ target triple = "aarch64-unknown-linux-gnu"
 define <4 x i8> @load_v4i8(<4 x i8>* %a) #0 {
 ; CHECK-LABEL: load_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    uunpklo z0.h, z0.b
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %load = load <4 x i8>, <4 x i8>* %a
@@ -44,12 +44,14 @@ define <32 x i8> @load_v32i8(<32 x i8>* %a) #0 {
 define <2 x i16> @load_v2i16(<2 x i16>* %a) #0 {
 ; CHECK-LABEL: load_v2i16:
 ; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldrh w8, [x0, #2]
-; CHECK-NEXT:    ldrh w9, [x0]
-; CHECK-NEXT:    fmov s0, w8
-; CHECK-NEXT:    fmov s1, w9
-; CHECK-NEXT:    zip1 z0.s, z1.s, z0.s
-; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
+; CHECK-NEXT:    str w8, [sp, #12]
+; CHECK-NEXT:    ldrh w8, [x0]
+; CHECK-NEXT:    str w8, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %load = load <2 x i16>, <2 x i16>* %a
   ret <2 x i16> %load

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
new file mode 100644
index 000000000000..fdcc96974f7b
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-shuffle.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Currently there is no custom lowering for vector shuffles operating on types
+; bigger than NEON. However, having no support opens us up to a code generator
+; hang when expanding BUILD_VECTOR. Here we just validate the promblematic case
+; successfully exits code generation.
+define void @hang_when_merging_stores_after_legalisation(<8 x i32>* %a, <2 x i32> %b) #0 {
+; CHECK-LABEL: hang_when_merging_stores_after_legalisation:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    stp w8, w8, [sp, #8]
+; CHECK-NEXT:    stp w8, w8, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    stp q0, q0, [x0]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %splat = shufflevector <2 x i32> %b, <2 x i32> undef, <8 x i32> zeroinitializer
+  %interleaved.vec = shufflevector <8 x i32> %splat, <8 x i32> undef, <8 x i32> <i32 0, i32 4, i32 1, i32 5, i32 2, i32 6, i32 3, i32 7>
+  store <8 x i32> %interleaved.vec, <8 x i32>* %a, align 4
+  ret void
+}
+
+; Ensure we don't crash when trying to lower a shuffle via an extract
+define void @crash_when_lowering_extract_shuffle(<32 x i32>* %dst, i1 %cond) #0 {
+; CHECK-LABEL: crash_when_lowering_extract_shuffle:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ret
+  %broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer
+  br i1 %cond, label %exit, label %vector.body
+
+vector.body:
+  %1 = load <32 x i32>, <32 x i32>* %dst, align 16
+  %predphi = select <32 x i1> %broadcast.splat, <32 x i32> zeroinitializer, <32 x i32> %1
+  store <32 x i32> %predphi, <32 x i32>* %dst, align 16
+  br label %exit
+
+exit:
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
index 25abc6010728..4b40f0d2e776 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-stores.ll
@@ -65,7 +65,8 @@ define void @store_v2f16(<2 x half>* %a) #0 {
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    adrp x8, .LCPI5_0
 ; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI5_0]
-; CHECK-NEXT:    str s0, [x0]
+; CHECK-NEXT:    fmov w8, s0
+; CHECK-NEXT:    str w8, [x0]
 ; CHECK-NEXT:    ret
   store <2 x half> zeroinitializer, <2 x half>* %a
   ret void


        


More information about the llvm-commits mailing list