[llvm] 2eb42e3 - [AArch64][SVE] Add fixed type lowering for EXTRACT_SUBVECTOR

Bradley Smith via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 12 08:05:39 PDT 2021


Author: Bradley Smith
Date: 2021-10-12T14:56:15Z
New Revision: 2eb42e3d2a4aa6806ad810da2ff3707cd7e47487

URL: https://github.com/llvm/llvm-project/commit/2eb42e3d2a4aa6806ad810da2ff3707cd7e47487
DIFF: https://github.com/llvm/llvm-project/commit/2eb42e3d2a4aa6806ad810da2ff3707cd7e47487.diff

LOG: [AArch64][SVE] Add fixed type lowering for EXTRACT_SUBVECTOR

Depends on D111135

Differential Revision: https://reviews.llvm.org/D111165

Added: 
    llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
    llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 069f30e912107..c7082bcc08e23 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10741,6 +10741,18 @@ SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
       InVT.getSizeInBits() == 128)
     return Op;
 
+  if (useSVEForFixedLengthVectorVT(InVT)) {
+    SDLoc DL(Op);
+
+    EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
+    SDValue NewInVec =
+        convertToScalableVector(DAG, ContainerVT, Op.getOperand(0));
+
+    SDValue Splice = DAG.getNode(ISD::VECTOR_SPLICE, DL, ContainerVT, NewInVec,
+                                 NewInVec, DAG.getConstant(Idx, DL, MVT::i64));
+    return convertFromScalableVector(DAG, Op.getValueType(), Splice);
+  }
+
   return SDValue();
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll
index 9b833e408fa9e..20954853d61b2 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-ext-loads.ll
@@ -51,14 +51,13 @@ define <16 x i32> @load_zext_v16i16i32(<16 x i16>* %ap) #0 {
   ; Ensure sensible type legalistaion
   ; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
   ; VBITS_EQ_256-DAG: ld1h { [[Z0:z[0-9]+]].h }, [[PG]]/z, [x0]
-  ; VBITS_EQ_256-DAG: mov x9, sp
-  ; VBITS_EQ_256-DAG: st1h { [[Z0]].h }, [[PG]], [x9]
-  ; VBITS_EQ_256-DAG: ldp q[[R0:[0-9]+]], q[[R1:[0-9]+]], [sp]
+  ; VBITS_EQ_256-DAG: mov x9, #8
   ; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-  ; VBITS_EQ_256-DAG: uunpklo z[[R0]].s, z[[R0]].h
-  ; VBITS_EQ_256-DAG: uunpklo z[[R1]].s, z[[R1]].h
-  ; VBITS_EQ_256-DAG: st1w { z[[R1]].s }, [[PG1]], [x8, x9, lsl #2]
-  ; VBITS_EQ_256-DAG: st1w { z[[R0]].s }, [[PG1]], [x8]
+  ; VBITS_EQ_256-DAG: uunpklo [[R0:z[0-9]+]].s, [[Z0]].h
+  ; VBITS_EQ_256-DAG: ext [[Z0]].b, [[Z0]].b, [[Z0]].b, #16
+  ; VBITS_EQ_256-DAG: uunpklo [[R1:z[0-9]+]].s, [[Z0]].h
+  ; VBITS_EQ_256-DAG: st1w { [[R1]].s }, [[PG1]], [x8, x9, lsl #2]
+  ; VBITS_EQ_256-DAG: st1w { [[R0]].s }, [[PG1]], [x8]
   ; VBITS_EQ_256-DAG: ret
   %a = load <16 x i16>, <16 x i16>* %ap
   %val = zext <16 x i16> %a to <16 x i32>
@@ -118,14 +117,13 @@ define <16 x i32> @load_sext_v16i16i32(<16 x i16>* %ap) #0 {
   ; Ensure sensible type legalistaion
   ; VBITS_EQ_256-DAG: ptrue [[PG:p[0-9]+]].h, vl16
   ; VBITS_EQ_256-DAG: ld1h { [[Z0:z[0-9]+]].h }, [[PG]]/z, [x0]
-  ; VBITS_EQ_256-DAG: mov x9, sp
-  ; VBITS_EQ_256-DAG: st1h { [[Z0]].h }, [[PG]], [x9]
-  ; VBITS_EQ_256-DAG: ldp q[[R0:[0-9]+]], q[[R1:[0-9]+]], [sp]
+  ; VBITS_EQ_256-DAG: mov x9, #8
   ; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
-  ; VBITS_EQ_256-DAG: sunpklo z[[R0]].s, z[[R0]].h
-  ; VBITS_EQ_256-DAG: sunpklo z[[R1]].s, z[[R1]].h
-  ; VBITS_EQ_256-DAG: st1w { z[[R1]].s }, [[PG1]], [x8, x9, lsl #2]
-  ; VBITS_EQ_256-DAG: st1w { z[[R0]].s }, [[PG1]], [x8]
+  ; VBITS_EQ_256-DAG: sunpklo [[R0:z[0-9]+]].s, [[Z0]].h
+  ; VBITS_EQ_256-DAG: ext [[Z0]].b, [[Z0]].b, [[Z0]].b, #16
+  ; VBITS_EQ_256-DAG: sunpklo [[R1:z[0-9]+]].s, [[Z0]].h
+  ; VBITS_EQ_256-DAG: st1w { [[R1]].s }, [[PG1]], [x8, x9, lsl #2]
+  ; VBITS_EQ_256-DAG: st1w { [[R0]].s }, [[PG1]], [x8]
   ; VBITS_EQ_256-DAG: ret
   %a = load <16 x i16>, <16 x i16>* %ap
   %val = sext <16 x i16> %a to <16 x i32>

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
new file mode 100644
index 0000000000000..305eadca87029
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-extract-subvector.ll
@@ -0,0 +1,688 @@
+; RUN: llc -aarch64-sve-vector-bits-min=128  < %s | FileCheck %s -check-prefix=NO_SVE
+; RUN: llc -aarch64-sve-vector-bits-min=256  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_EQ_256
+; RUN: llc -aarch64-sve-vector-bits-min=384  < %s | FileCheck %s -check-prefixes=CHECK
+; RUN: llc -aarch64-sve-vector-bits-min=512  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=640  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=768  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=896  < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512
+; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024
+; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512,VBITS_GE_1024,VBITS_GE_2048
+
+target triple = "aarch64-unknown-linux-gnu"
+
+; Don't use SVE when its registers are no bigger than NEON.
+; NO_SVE-NOT: ptrue
+
+; i8
+
+; Don't use SVE for 64-bit vectors.
+define <4 x i8> @extract_subvector_v8i8(<8 x i8> %op) #0 {
+; CHECK-LABEL: extract_subvector_v8i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    zip2 v0.8b, v0.8b, v0.8b
+; CHECK-NEXT:    ret
+  %ret = call <4 x i8> @llvm.experimental.vector.extract.v4i8.v8i8(<8 x i8> %op, i64 4)
+  ret <4 x i8> %ret
+}
+
+; Don't use SVE for 128-bit vectors.
+define <8 x i8> @extract_subvector_v16i8(<16 x i8> %op) #0 {
+; CHECK-LABEL: extract_subvector_v16i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <8 x i8> @llvm.experimental.vector.extract.v8i8.v16i8(<16 x i8> %op, i64 8)
+  ret <8 x i8> %ret
+}
+
+define void @extract_subvector_v32i8(<32 x i8>* %a, <16 x i8>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v32i8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.b, vl32
+; CHECK-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <32 x i8>, <32 x i8>* %a
+  %ret = call <16 x i8> @llvm.experimental.vector.extract.v16i8.v32i8(<32 x i8> %op, i64 16)
+  store <16 x i8> %ret, <16 x i8>* %b
+  ret void
+}
+
+define void @extract_subvector_v64i8(<64 x i8>* %a, <32 x i8>* %b) #0 {
+; VBITS_EQ_256-LABEL: extract_subvector_v64i8:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov w8, #32
+; VBITS_EQ_256-NEXT:    ptrue p0.b, vl32
+; VBITS_EQ_256-NEXT:    ld1b { z0.b }, p0/z, [x0, x8]
+; VBITS_EQ_256-NEXT:    st1b { z0.b }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: extract_subvector_v64i8:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl64
+; VBITS_GE_512-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.b, vl32
+; VBITS_GE_512-NEXT:    ext z0.b, z0.b, z0.b, #32
+; VBITS_GE_512-NEXT:    st1b { z0.b }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
+  %op = load <64 x i8>, <64 x i8>* %a
+  %ret = call <32 x i8> @llvm.experimental.vector.extract.v32i8.v64i8(<64 x i8> %op, i64 32)
+  store <32 x i8> %ret, <32 x i8>* %b
+  ret void
+}
+
+define void @extract_subvector_v128i8(<128 x i8>* %a, <64 x i8>* %b) #0 {
+; VBITS_GE_1024-LABEL: extract_subvector_v128i8:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.b, vl128
+; VBITS_GE_1024-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.b, vl64
+; VBITS_GE_1024-NEXT:    ext z0.b, z0.b, z0.b, #64
+; VBITS_GE_1024-NEXT:    st1b { z0.b }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
+  %op = load <128 x i8>, <128 x i8>* %a
+  %ret = call <64 x i8> @llvm.experimental.vector.extract.v64i8.v128i8(<128 x i8> %op, i64 64)
+  store <64 x i8> %ret, <64 x i8>* %b
+  ret void
+}
+
+define void @extract_subvector_v256i8(<256 x i8>* %a, <128 x i8>* %b) #0 {
+; VBITS_GE_2048-LABEL: extract_subvector_v256i8:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl256
+; VBITS_GE_2048-NEXT:    ld1b { z0.b }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.b, vl128
+; VBITS_GE_2048-NEXT:    ext z0.b, z0.b, z0.b, #128
+; VBITS_GE_2048-NEXT:    st1b { z0.b }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
+  %op = load <256 x i8>, <256 x i8>* %a
+  %ret = call <128 x i8> @llvm.experimental.vector.extract.v128i8.v256i8(<256 x i8> %op, i64 128)
+  store <128 x i8> %ret, <128 x i8>* %b
+  ret void
+}
+
+; i16
+
+; Don't use SVE for 64-bit vectors.
+define <2 x i16> @extract_subvector_v4i16(<4 x i16> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    umov w8, v0.h[2]
+; CHECK-NEXT:    umov w9, v0.h[3]
+; CHECK-NEXT:    fmov s0, w8
+; CHECK-NEXT:    mov v0.s[1], w9
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <2 x i16> @llvm.experimental.vector.extract.v2i16.v4i16(<4 x i16> %op, i64 2)
+  ret <2 x i16> %ret
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x i16> @extract_subvector_v8i16(<8 x i16> %op) #0 {
+; CHECK-LABEL: extract_subvector_v8i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <4 x i16> @llvm.experimental.vector.extract.v4i16.v8i16(<8 x i16> %op, i64 4)
+  ret <4 x i16> %ret
+}
+
+define void @extract_subvector_v16i16(<16 x i16>* %a, <8 x i16>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v16i16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <16 x i16>, <16 x i16>* %a
+  %ret = call <8 x i16> @llvm.experimental.vector.extract.v8i16.v16i16(<16 x i16> %op, i64 8)
+  store <8 x i16> %ret, <8 x i16>* %b
+  ret void
+}
+
+define void @extract_subvector_v32i16(<32 x i16>* %a, <16 x i16>* %b) #0 {
+; VBITS_EQ_256-LABEL: extract_subvector_v32i16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: extract_subvector_v32i16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    ext z0.b, z0.b, z0.b, #32
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
+  %op = load <32 x i16>, <32 x i16>* %a
+  %ret = call <16 x i16> @llvm.experimental.vector.extract.v16i16.v32i16(<32 x i16> %op, i64 16)
+  store <16 x i16> %ret, <16 x i16>* %b
+  ret void
+}
+
+define void @extract_subvector_v64i16(<64 x i16>* %a, <32 x i16>* %b) #0 {
+; VBITS_GE_1024-LABEL: extract_subvector_v64i16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    ext z0.b, z0.b, z0.b, #64
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
+  %op = load <64 x i16>, <64 x i16>* %a
+  %ret = call <32 x i16> @llvm.experimental.vector.extract.v32i16.v64i16(<64 x i16> %op, i64 32)
+  store <32 x i16> %ret, <32 x i16>* %b
+  ret void
+}
+
+define void @extract_subvector_v128i16(<128 x i16>* %a, <64 x i16>* %b) #0 {
+; VBITS_GE_2048-LABEL: extract_subvector_v128i16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    ext z0.b, z0.b, z0.b, #128
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
+  %op = load <128 x i16>, <128 x i16>* %a
+  %ret = call <64 x i16> @llvm.experimental.vector.extract.v64i16.v128i16(<128 x i16> %op, i64 64)
+  store <64 x i16> %ret, <64 x i16>* %b
+  ret void
+}
+
+; i32
+
+; Don't use SVE for 64-bit vectors.
+define <1 x i32> @extract_subvector_v2i32(<2 x i32> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[1]
+; CHECK-NEXT:    ret
+  %ret = call <1 x i32> @llvm.experimental.vector.extract.v1i32.v2i32(<2 x i32> %op, i64 1)
+  ret <1 x i32> %ret
+}
+
+; Don't use SVE for 128-bit vectors.
+define <2 x i32> @extract_subvector_v4i32(<4 x i32> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<4 x i32> %op, i64 2)
+  ret <2 x i32> %ret
+}
+
+define void @extract_subvector_v8i32(<8 x i32>* %a, <4 x i32>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v8i32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <8 x i32>, <8 x i32>* %a
+  %ret = call <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32> %op, i64 4)
+  store <4 x i32> %ret, <4 x i32>* %b
+  ret void
+}
+
+define void @extract_subvector_v16i32(<16 x i32>* %a, <8 x i32>* %b) #0 {
+; VBITS_EQ_256-LABEL: extract_subvector_v16i32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: extract_subvector_v16i32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ext z0.b, z0.b, z0.b, #32
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
+  %op = load <16 x i32>, <16 x i32>* %a
+  %ret = call <8 x i32> @llvm.experimental.vector.extract.v8i32.v16i32(<16 x i32> %op, i64 8)
+  store <8 x i32> %ret, <8 x i32>* %b
+  ret void
+}
+
+define void @extract_subvector_v32i32(<32 x i32>* %a, <16 x i32>* %b) #0 {
+; VBITS_GE_1024-LABEL: extract_subvector_v32i32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ext z0.b, z0.b, z0.b, #64
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
+  %op = load <32 x i32>, <32 x i32>* %a
+  %ret = call <16 x i32> @llvm.experimental.vector.extract.v16i32.v32i32(<32 x i32> %op, i64 16)
+  store <16 x i32> %ret, <16 x i32>* %b
+  ret void
+}
+
+define void @extract_subvector_v64i32(<64 x i32>* %a, <32 x i32>* %b) #0 {
+; VBITS_GE_2048-LABEL: extract_subvector_v64i32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ext z0.b, z0.b, z0.b, #128
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
+  %op = load <64 x i32>, <64 x i32>* %a
+  %ret = call <32 x i32> @llvm.experimental.vector.extract.v32i32.v64i32(<64 x i32> %op, i64 32)
+  store <32 x i32> %ret, <32 x i32>* %b
+  ret void
+}
+
+; i64
+
+; Don't use SVE for 128-bit vectors.
+define <1 x i64> @extract_subvector_v2i64(<2 x i64> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <1 x i64> @llvm.experimental.vector.extract.v1i64.v2i64(<2 x i64> %op, i64 1)
+  ret <1 x i64> %ret
+}
+
+define void @extract_subvector_v4i64(<4 x i64>* %a, <2 x i64>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v4i64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <4 x i64>, <4 x i64>* %a
+  %ret = call <2 x i64> @llvm.experimental.vector.extract.v2i64.v4i64(<4 x i64> %op, i64 2)
+  store <2 x i64> %ret, <2 x i64>* %b
+  ret void
+}
+
+define void @extract_subvector_v8i64(<8 x i64>* %a, <4 x i64>* %b) #0 {
+; VBITS_EQ_256-LABEL: extract_subvector_v8i64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: extract_subvector_v8i64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl4
+; VBITS_GE_512-NEXT:    ext z0.b, z0.b, z0.b, #32
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
+  %op = load <8 x i64>, <8 x i64>* %a
+  %ret = call <4 x i64> @llvm.experimental.vector.extract.v4i64.v8i64(<8 x i64> %op, i64 4)
+  store <4 x i64> %ret, <4 x i64>* %b
+  ret void
+}
+
+define void @extract_subvector_v16i64(<16 x i64>* %a, <8 x i64>* %b) #0 {
+; VBITS_GE_1024-LABEL: extract_subvector_v16i64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_1024-NEXT:    ext z0.b, z0.b, z0.b, #64
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
+  %op = load <16 x i64>, <16 x i64>* %a
+  %ret = call <8 x i64> @llvm.experimental.vector.extract.v8i64.v16i64(<16 x i64> %op, i64 8)
+  store <8 x i64> %ret, <8 x i64>* %b
+  ret void
+}
+
+define void @extract_subvector_v32i64(<32 x i64>* %a, <16 x i64>* %b) #0 {
+; VBITS_GE_2048-LABEL: extract_subvector_v32i64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_2048-NEXT:    ext z0.b, z0.b, z0.b, #128
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
+  %op = load <32 x i64>, <32 x i64>* %a
+  %ret = call <16 x i64> @llvm.experimental.vector.extract.v16i64.v32i64(<32 x i64> %op, i64 16)
+  store <16 x i64> %ret, <16 x i64>* %b
+  ret void
+}
+
+; f16
+
+; Don't use SVE for 64-bit vectors.
+define <2 x half> @extract_subvector_v4f16(<4 x half> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[1]
+; CHECK-NEXT:    ret
+  %ret = call <2 x half> @llvm.experimental.vector.extract.v2f16.v4f16(<4 x half> %op, i64 2)
+  ret <2 x half> %ret
+}
+
+; Don't use SVE for 128-bit vectors.
+define <4 x half> @extract_subvector_v8f16(<8 x half> %op) #0 {
+; CHECK-LABEL: extract_subvector_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <4 x half> @llvm.experimental.vector.extract.v4f16.v8f16(<8 x half> %op, i64 4)
+  ret <4 x half> %ret
+}
+
+define void @extract_subvector_v16f16(<16 x half>* %a, <8 x half>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v16f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.h, vl16
+; CHECK-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <16 x half>, <16 x half>* %a
+  %ret = call <8 x half> @llvm.experimental.vector.extract.v8f16.v16f16(<16 x half> %op, i64 8)
+  store <8 x half> %ret, <8 x half>* %b
+  ret void
+}
+
+define void @extract_subvector_v32f16(<32 x half>* %a, <16 x half>* %b) #0 {
+; VBITS_EQ_256-LABEL: extract_subvector_v32f16:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #16
+; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0, x8, lsl #1]
+; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: extract_subvector_v32f16:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_512-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
+; VBITS_GE_512-NEXT:    ext z0.b, z0.b, z0.b, #32
+; VBITS_GE_512-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
+  %op = load <32 x half>, <32 x half>* %a
+  %ret = call <16 x half> @llvm.experimental.vector.extract.v16f16.v32f16(<32 x half> %op, i64 16)
+  store <16 x half> %ret, <16 x half>* %b
+  ret void
+}
+
+define void @extract_subvector_v64f16(<64 x half>* %a, <32 x half>* %b) #0 {
+; VBITS_GE_1024-LABEL: extract_subvector_v64f16:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_1024-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.h, vl32
+; VBITS_GE_1024-NEXT:    ext z0.b, z0.b, z0.b, #64
+; VBITS_GE_1024-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
+  %op = load <64 x half>, <64 x half>* %a
+  %ret = call <32 x half> @llvm.experimental.vector.extract.v32f16.v64f16(<64 x half> %op, i64 32)
+  store <32 x half> %ret, <32 x half>* %b
+  ret void
+}
+
+define void @extract_subvector_v128f16(<128 x half>* %a, <64 x half>* %b) #0 {
+; VBITS_GE_2048-LABEL: extract_subvector_v128f16:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl128
+; VBITS_GE_2048-NEXT:    ld1h { z0.h }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.h, vl64
+; VBITS_GE_2048-NEXT:    ext z0.b, z0.b, z0.b, #128
+; VBITS_GE_2048-NEXT:    st1h { z0.h }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
+  %op = load <128 x half>, <128 x half>* %a
+  %ret = call <64 x half> @llvm.experimental.vector.extract.v64f16.v128f16(<128 x half> %op, i64 64)
+  store <64 x half> %ret, <64 x half>* %b
+  ret void
+}
+
+; f32
+
+; Don't use SVE for 64-bit vectors.
+define <1 x float> @extract_subvector_v2f32(<2 x float> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
+; CHECK-NEXT:    dup v0.2s, v0.s[1]
+; CHECK-NEXT:    ret
+  %ret = call <1 x float> @llvm.experimental.vector.extract.v1f32.v2f32(<2 x float> %op, i64 1)
+  ret <1 x float> %ret
+}
+
+; Don't use SVE for 128-bit vectors.
+define <2 x float> @extract_subvector_v4f32(<4 x float> %op) #0 {
+; CHECK-LABEL: extract_subvector_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <2 x float> @llvm.experimental.vector.extract.v2f32.v4f32(<4 x float> %op, i64 2)
+  ret <2 x float> %ret
+}
+
+define void @extract_subvector_v8f32(<8 x float>* %a, <4 x float>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.s, vl8
+; CHECK-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <8 x float>, <8 x float>* %a
+  %ret = call <4 x float> @llvm.experimental.vector.extract.v4f32.v8f32(<8 x float> %op, i64 4)
+  store <4 x float> %ret, <4 x float>* %b
+  ret void
+}
+
+define void @extract_subvector_v16f32(<16 x float>* %a, <8 x float>* %b) #0 {
+; VBITS_EQ_256-LABEL: extract_subvector_v16f32:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0, x8, lsl #2]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: extract_subvector_v16f32:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_512-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.s, vl8
+; VBITS_GE_512-NEXT:    ext z0.b, z0.b, z0.b, #32
+; VBITS_GE_512-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
+  %op = load <16 x float>, <16 x float>* %a
+  %ret = call <8 x float> @llvm.experimental.vector.extract.v8f32.v16f32(<16 x float> %op, i64 8)
+  store <8 x float> %ret, <8 x float>* %b
+  ret void
+}
+
+define void @extract_subvector_v32f32(<32 x float>* %a, <16 x float>* %b) #0 {
+; VBITS_GE_1024-LABEL: extract_subvector_v32f32:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_1024-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.s, vl16
+; VBITS_GE_1024-NEXT:    ext z0.b, z0.b, z0.b, #64
+; VBITS_GE_1024-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
+  %op = load <32 x float>, <32 x float>* %a
+  %ret = call <16 x float> @llvm.experimental.vector.extract.v16f32.v32f32(<32 x float> %op, i64 16)
+  store <16 x float> %ret, <16 x float>* %b
+  ret void
+}
+
+define void @extract_subvector_v64f32(<64 x float>* %a, <32 x float>* %b) #0 {
+; VBITS_GE_2048-LABEL: extract_subvector_v64f32:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl64
+; VBITS_GE_2048-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.s, vl32
+; VBITS_GE_2048-NEXT:    ext z0.b, z0.b, z0.b, #128
+; VBITS_GE_2048-NEXT:    st1w { z0.s }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
+  %op = load <64 x float>, <64 x float>* %a
+  %ret = call <32 x float> @llvm.experimental.vector.extract.v32f32.v64f32(<64 x float> %op, i64 32)
+  store <32 x float> %ret, <32 x float>* %b
+  ret void
+}
+
+; f64
+
+; Don't use SVE for 128-bit vectors.
+define <1 x double> @extract_subvector_v2f64(<2 x double> %op) #0 {
+; CHECK-LABEL: extract_subvector_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ext v0.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    ret
+  %ret = call <1 x double> @llvm.experimental.vector.extract.v1f64.v2f64(<2 x double> %op, i64 1)
+  ret <1 x double> %ret
+}
+
+define void @extract_subvector_v4f64(<4 x double>* %a, <2 x double>* %b) #0 {
+; CHECK-LABEL: extract_subvector_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ptrue p0.d, vl4
+; CHECK-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    ext z0.b, z0.b, z0.b, #16
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
+  %op = load <4 x double>, <4 x double>* %a
+  %ret = call <2 x double> @llvm.experimental.vector.extract.v2f64.v4f64(<4 x double> %op, i64 2)
+  store <2 x double> %ret, <2 x double>* %b
+  ret void
+}
+
+define void @extract_subvector_v8f64(<8 x double>* %a, <4 x double>* %b) #0 {
+; VBITS_EQ_256-LABEL: extract_subvector_v8f64:
+; VBITS_EQ_256:       // %bb.0:
+; VBITS_EQ_256-NEXT:    mov x8, #4
+; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p0/z, [x0, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    ret
+;
+; VBITS_GE_512-LABEL: extract_subvector_v8f64:
+; VBITS_GE_512:       // %bb.0:
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_512-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_512-NEXT:    ptrue p0.d, vl4
+; VBITS_GE_512-NEXT:    ext z0.b, z0.b, z0.b, #32
+; VBITS_GE_512-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_512-NEXT:    ret
+  %op = load <8 x double>, <8 x double>* %a
+  %ret = call <4 x double> @llvm.experimental.vector.extract.v4f64.v8f64(<8 x double> %op, i64 4)
+  store <4 x double> %ret, <4 x double>* %b
+  ret void
+}
+
+define void @extract_subvector_v16f64(<16 x double>* %a, <8 x double>* %b) #0 {
+; VBITS_GE_1024-LABEL: extract_subvector_v16f64:
+; VBITS_GE_1024:       // %bb.0:
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_1024-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_1024-NEXT:    ptrue p0.d, vl8
+; VBITS_GE_1024-NEXT:    ext z0.b, z0.b, z0.b, #64
+; VBITS_GE_1024-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_1024-NEXT:    ret
+  %op = load <16 x double>, <16 x double>* %a
+  %ret = call <8 x double> @llvm.experimental.vector.extract.v8f64.v16f64(<16 x double> %op, i64 8)
+  store <8 x double> %ret, <8 x double>* %b
+  ret void
+}
+
+define void @extract_subvector_v32f64(<32 x double>* %a, <16 x double>* %b) #0 {
+; VBITS_GE_2048-LABEL: extract_subvector_v32f64:
+; VBITS_GE_2048:       // %bb.0:
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl32
+; VBITS_GE_2048-NEXT:    ld1d { z0.d }, p0/z, [x0]
+; VBITS_GE_2048-NEXT:    ptrue p0.d, vl16
+; VBITS_GE_2048-NEXT:    ext z0.b, z0.b, z0.b, #128
+; VBITS_GE_2048-NEXT:    st1d { z0.d }, p0, [x1]
+; VBITS_GE_2048-NEXT:    ret
+  %op = load <32 x double>, <32 x double>* %a
+  %ret = call <16 x double> @llvm.experimental.vector.extract.v16f64.v32f64(<32 x double> %op, i64 16)
+  store <16 x double> %ret, <16 x double>* %b
+  ret void
+}
+
+declare <4 x i8> @llvm.experimental.vector.extract.v4i8.v8i8(<8 x i8>, i64)
+declare <8 x i8> @llvm.experimental.vector.extract.v8i8.v16i8(<16 x i8>, i64)
+declare <16 x i8> @llvm.experimental.vector.extract.v16i8.v32i8(<32 x i8>, i64)
+declare <32 x i8> @llvm.experimental.vector.extract.v32i8.v64i8(<64 x i8>, i64)
+declare <64 x i8> @llvm.experimental.vector.extract.v64i8.v128i8(<128 x i8>, i64)
+declare <128 x i8> @llvm.experimental.vector.extract.v128i8.v256i8(<256 x i8>, i64)
+
+declare <2 x i16> @llvm.experimental.vector.extract.v2i16.v4i16(<4 x i16>, i64)
+declare <4 x i16> @llvm.experimental.vector.extract.v4i16.v8i16(<8 x i16>, i64)
+declare <8 x i16> @llvm.experimental.vector.extract.v8i16.v16i16(<16 x i16>, i64)
+declare <16 x i16> @llvm.experimental.vector.extract.v16i16.v32i16(<32 x i16>, i64)
+declare <32 x i16> @llvm.experimental.vector.extract.v32i16.v64i16(<64 x i16>, i64)
+declare <64 x i16> @llvm.experimental.vector.extract.v64i16.v128i16(<128 x i16>, i64)
+
+declare <1 x i32> @llvm.experimental.vector.extract.v1i32.v2i32(<2 x i32>, i64)
+declare <2 x i32> @llvm.experimental.vector.extract.v2i32.v4i32(<4 x i32>, i64)
+declare <4 x i32> @llvm.experimental.vector.extract.v4i32.v8i32(<8 x i32>, i64)
+declare <8 x i32> @llvm.experimental.vector.extract.v8i32.v16i32(<16 x i32>, i64)
+declare <16 x i32> @llvm.experimental.vector.extract.v16i32.v32i32(<32 x i32>, i64)
+declare <32 x i32> @llvm.experimental.vector.extract.v32i32.v64i32(<64 x i32>, i64)
+
+declare <1 x i64> @llvm.experimental.vector.extract.v1i64.v2i64(<2 x i64>, i64)
+declare <2 x i64> @llvm.experimental.vector.extract.v2i64.v4i64(<4 x i64>, i64)
+declare <4 x i64> @llvm.experimental.vector.extract.v4i64.v8i64(<8 x i64>, i64)
+declare <8 x i64> @llvm.experimental.vector.extract.v8i64.v16i64(<16 x i64>, i64)
+declare <16 x i64> @llvm.experimental.vector.extract.v16i64.v32i64(<32 x i64>, i64)
+
+declare <2 x half> @llvm.experimental.vector.extract.v2f16.v4f16(<4 x half>, i64)
+declare <4 x half> @llvm.experimental.vector.extract.v4f16.v8f16(<8 x half>, i64)
+declare <8 x half> @llvm.experimental.vector.extract.v8f16.v16f16(<16 x half>, i64)
+declare <16 x half> @llvm.experimental.vector.extract.v16f16.v32f16(<32 x half>, i64)
+declare <32 x half> @llvm.experimental.vector.extract.v32f16.v64f16(<64 x half>, i64)
+declare <64 x half> @llvm.experimental.vector.extract.v64f16.v128f16(<128 x half>, i64)
+
+declare <1 x float> @llvm.experimental.vector.extract.v1f32.v2f32(<2 x float>, i64)
+declare <2 x float> @llvm.experimental.vector.extract.v2f32.v4f32(<4 x float>, i64)
+declare <4 x float> @llvm.experimental.vector.extract.v4f32.v8f32(<8 x float>, i64)
+declare <8 x float> @llvm.experimental.vector.extract.v8f32.v16f32(<16 x float>, i64)
+declare <16 x float> @llvm.experimental.vector.extract.v16f32.v32f32(<32 x float>, i64)
+declare <32 x float> @llvm.experimental.vector.extract.v32f32.v64f32(<64 x float>, i64)
+
+declare <1 x double> @llvm.experimental.vector.extract.v1f64.v2f64(<2 x double>, i64)
+declare <2 x double> @llvm.experimental.vector.extract.v2f64.v4f64(<4 x double>, i64)
+declare <4 x double> @llvm.experimental.vector.extract.v4f64.v8f64(<8 x double>, i64)
+declare <8 x double> @llvm.experimental.vector.extract.v8f64.v16f64(<16 x double>, i64)
+declare <16 x double> @llvm.experimental.vector.extract.v16f64.v32f64(<32 x double>, i64)
+
+attributes #0 = { "target-features"="+sve" }

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
index 048aaff8ec3bd..f9d913570a3f5 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-extend-trunc.ll
@@ -61,33 +61,22 @@ define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
 }
 
 define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
+; Ensure sensible type legalisation.
 ; VBITS_EQ_256-LABEL: fcvt_v16f16_v16f32:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #48
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
-; VBITS_EQ_256-NEXT:    mov x8, sp
-; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x8]
 ; VBITS_EQ_256-NEXT:    mov x8, #8
-; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    uunpklo z1.s, z0.h
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
 ; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
-; VBITS_EQ_256-NEXT:    fcvt z0.s, p0/m, z0.h
-; VBITS_EQ_256-NEXT:    uunpklo z1.s, z1.h
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
 ; VBITS_EQ_256-NEXT:    fcvt z1.s, p0/m, z1.h
-; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1, x8, lsl #2]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    fcvt z0.s, p0/m, z0.h
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    ret
-;
+
 ; VBITS_GE_512-LABEL: fcvt_v16f16_v16f32:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    ptrue p0.h, vl16
@@ -197,7 +186,7 @@ define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
 ; VBITS_EQ_256-NEXT:    fcvt z1.d, p0/m, z1.h
 ; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
-;
+
 ; VBITS_GE_512-LABEL: fcvt_v8f16_v8f64:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    ldr q0, [x0]
@@ -288,33 +277,22 @@ define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
 }
 
 define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
+; Ensure sensible type legalisation.
 ; VBITS_EQ_256-LABEL: fcvt_v8f32_v8f64:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #48
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
-; VBITS_EQ_256-NEXT:    mov x8, sp
-; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x8]
 ; VBITS_EQ_256-NEXT:    mov x8, #4
-; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z0.s
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
 ; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
-; VBITS_EQ_256-NEXT:    fcvt z0.d, p0/m, z0.s
-; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
-; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
 ; VBITS_EQ_256-NEXT:    fcvt z1.d, p0/m, z1.s
-; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    fcvt z0.d, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
-;
+
 ; VBITS_GE_512-LABEL: fcvt_v8f32_v8f64:
 ; VBITS_GE_512:       // %bb.0:
 ; VBITS_GE_512-NEXT:    ptrue p0.s, vl8

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
index ba47688f6ca58..f4806abd06e44 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-fp-to-int.ll
@@ -156,16 +156,14 @@ define void @fcvtzu_v16f16_v16i32(<16 x half>* %a, <16 x i32>* %b) #0 {
 ; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
 ; VBITS_GE_512-NEXT: ret
 
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
+; Ensure sensible type legalisation.
 ; VBITS_EQ_256: ptrue [[PG1:p[0-9]+]].h, vl16
 ; VBITS_EQ_256-DAG: ld1h { [[VEC:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1h { [[VEC:z[0-9]+]].h }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
 ; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s, vl8
 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].s, z[[LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].s, z[[HI]].h
+; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].s, [[VEC]].h
+; VBITS_EQ_256-DAG: ext [[VEC_HI:z[0-9]+]].b, [[VEC]].b, [[VEC]].b, #16
+; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].s, [[VEC_HI]].h
 ; VBITS_EQ_256-DAG: fcvtzu [[RES_LO:z[0-9]+]].s, [[PG2]]/m, [[UPK_LO]].h
 ; VBITS_EQ_256-DAG: fcvtzu [[RES_HI:z[0-9]+]].s, [[PG2]]/m, [[UPK_HI]].h
 ; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG2]], [x1]
@@ -548,16 +546,14 @@ define void @fcvtzu_v8f32_v8i64(<8 x float>* %a, <8 x i64>* %b) #0 {
 ; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
 ; VBITS_GE_512-NEXT: ret
 
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
+; Ensure sensible type legalisation.
 ; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
 ; VBITS_EQ_256-DAG: ld1w { [[VEC:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1w { [[VEC:z[0-9]+]].s }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
 ; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d, vl4
 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].d, z[[LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].d, z[[HI]].s
+; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].d, [[VEC]].s
+; VBITS_EQ_256-DAG: ext [[VEC_HI:z[0-9]+]].b, [[VEC]].b, [[VEC]].b, #16
+; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].d, [[VEC_HI]].s
 ; VBITS_EQ_256-DAG: fcvtzu [[RES_LO:z[0-9]+]].d, [[PG2]]/m, [[UPK_LO]].s
 ; VBITS_EQ_256-DAG: fcvtzu [[RES_HI:z[0-9]+]].d, [[PG2]]/m, [[UPK_HI]].s
 ; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG2]], [x1]
@@ -1025,16 +1021,14 @@ define void @fcvtzs_v16f16_v16i32(<16 x half>* %a, <16 x i32>* %b) #0 {
 ; VBITS_GE_512-NEXT: st1w { [[RES]].s }, [[PG1]], [x1]
 ; VBITS_GE_512-NEXT: ret
 
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
+; Ensure sensible type legalisation.
 ; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].h, vl16
 ; VBITS_EQ_256-DAG: ld1h { [[VEC:z[0-9]+]].h }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1h { [[VEC:z[0-9]+]].h }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
 ; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].s, vl8
 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #8
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].s, z[[LO]].h
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].s, z[[HI]].h
+; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].s, [[VEC]].h
+; VBITS_EQ_256-DAG: ext [[VEC_HI:z[0-9]+]].b, [[VEC]].b, [[VEC]].b, #16
+; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].s, [[VEC_HI]].h
 ; VBITS_EQ_256-DAG: fcvtzs [[RES_LO:z[0-9]+]].s, [[PG2]]/m, [[UPK_LO]].h
 ; VBITS_EQ_256-DAG: fcvtzs [[RES_HI:z[0-9]+]].s, [[PG2]]/m, [[UPK_HI]].h
 ; VBITS_EQ_256-DAG: st1w { [[RES_LO]].s }, [[PG2]], [x1]
@@ -1417,16 +1411,14 @@ define void @fcvtzs_v8f32_v8i64(<8 x float>* %a, <8 x i64>* %b) #0 {
 ; VBITS_GE_512-NEXT: st1d { [[RES]].d }, [[PG1]], [x1]
 ; VBITS_GE_512-NEXT: ret
 
-; Ensure sensible type legalisation - fixed type extract_subvector codegen is poor currently.
+; Ensure sensible type legalisation.
 ; VBITS_EQ_256-DAG: ptrue [[PG1:p[0-9]+]].s, vl8
 ; VBITS_EQ_256-DAG: ld1w { [[VEC:z[0-9]+]].s }, [[PG1]]/z, [x0]
-; VBITS_EQ_256-DAG: mov x8, sp
-; VBITS_EQ_256-DAG: st1w { [[VEC:z[0-9]+]].s }, [[PG1]], [x8]
-; VBITS_EQ_256-DAG: ldp q[[LO:[0-9]+]], q[[HI:[0-9]+]], [sp]
 ; VBITS_EQ_256-DAG: ptrue [[PG2:p[0-9]+]].d, vl4
 ; VBITS_EQ_256-DAG: mov x[[NUMELTS:[0-9]+]], #4
-; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].d, z[[LO]].s
-; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].d, z[[HI]].s
+; VBITS_EQ_256-DAG: uunpklo [[UPK_LO:z[0-9]+]].d, [[VEC]].s
+; VBITS_EQ_256-DAG: ext [[VEC_HI:z[0-9]+]].b, [[VEC]].b, [[VEC]].b, #16
+; VBITS_EQ_256-DAG: uunpklo [[UPK_HI:z[0-9]+]].d, [[VEC]].s
 ; VBITS_EQ_256-DAG: fcvtzs [[RES_LO:z[0-9]+]].d, [[PG2]]/m, [[UPK_LO]].s
 ; VBITS_EQ_256-DAG: fcvtzs [[RES_HI:z[0-9]+]].d, [[PG2]]/m, [[UPK_HI]].s
 ; VBITS_EQ_256-DAG: st1d { [[RES_LO]].d }, [[PG2]], [x1]

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
index a3358157ec77c..b2beba06dcb2f 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-int-to-fp.ll
@@ -161,28 +161,17 @@ define void @ucvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 define void @ucvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
 ; VBITS_EQ_256-LABEL: ucvtf_v16i16_v16f32:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #48
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
-; VBITS_EQ_256-NEXT:    mov x8, sp
-; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x8]
 ; VBITS_EQ_256-NEXT:    mov x8, #8
-; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    uunpklo z1.s, z0.h
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
 ; VBITS_EQ_256-NEXT:    uunpklo z0.s, z0.h
-; VBITS_EQ_256-NEXT:    ucvtf z0.s, p0/m, z0.s
-; VBITS_EQ_256-NEXT:    uunpklo z1.s, z1.h
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
 ; VBITS_EQ_256-NEXT:    ucvtf z1.s, p0/m, z1.s
-; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1, x8, lsl #2]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ucvtf z0.s, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: ucvtf_v16i16_v16f32:
@@ -593,28 +582,17 @@ define void @ucvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 define void @ucvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
 ; VBITS_EQ_256-LABEL: ucvtf_v8i32_v8f64:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #48
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
-; VBITS_EQ_256-NEXT:    mov x8, sp
-; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x8]
 ; VBITS_EQ_256-NEXT:    mov x8, #4
-; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z0.s
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
 ; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
-; VBITS_EQ_256-NEXT:    ucvtf z0.d, p0/m, z0.d
-; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
-; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
 ; VBITS_EQ_256-NEXT:    ucvtf z1.d, p0/m, z1.d
-; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    ucvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: ucvtf_v8i32_v8f64:
@@ -1126,28 +1104,17 @@ define void @scvtf_v8i16_v8f32(<8 x i16>* %a, <8 x float>* %b) #0 {
 define void @scvtf_v16i16_v16f32(<16 x i16>* %a, <16 x float>* %b) #0 {
 ; VBITS_EQ_256-LABEL: scvtf_v16i16_v16f32:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #48
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_256-NEXT:    ptrue p0.h, vl16
-; VBITS_EQ_256-NEXT:    mov x8, sp
-; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    st1h { z0.h }, p0, [x8]
 ; VBITS_EQ_256-NEXT:    mov x8, #8
-; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ld1h { z0.h }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
+; VBITS_EQ_256-NEXT:    sunpklo z1.s, z0.h
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
 ; VBITS_EQ_256-NEXT:    sunpklo z0.s, z0.h
-; VBITS_EQ_256-NEXT:    scvtf z0.s, p0/m, z0.s
-; VBITS_EQ_256-NEXT:    sunpklo z1.s, z1.h
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1]
 ; VBITS_EQ_256-NEXT:    scvtf z1.s, p0/m, z1.s
-; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1, x8, lsl #2]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    scvtf z0.s, p0/m, z0.s
+; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x1]
+; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x1, x8, lsl #2]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: scvtf_v16i16_v16f32:
@@ -1558,28 +1525,17 @@ define void @scvtf_v4i32_v4f64(<4 x i32>* %a, <4 x double>* %b) #0 {
 define void @scvtf_v8i32_v8f64(<8 x i32>* %a, <8 x double>* %b) #0 {
 ; VBITS_EQ_256-LABEL: scvtf_v8i32_v8f64:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #48
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
-; VBITS_EQ_256-NEXT:    mov x8, sp
-; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p0, [x8]
 ; VBITS_EQ_256-NEXT:    mov x8, #4
-; VBITS_EQ_256-NEXT:    ldp q0, q1, [sp]
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
 ; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
+; VBITS_EQ_256-NEXT:    sunpklo z1.d, z0.s
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
 ; VBITS_EQ_256-NEXT:    sunpklo z0.d, z0.s
-; VBITS_EQ_256-NEXT:    scvtf z0.d, p0/m, z0.d
-; VBITS_EQ_256-NEXT:    sunpklo z1.d, z1.s
-; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1]
 ; VBITS_EQ_256-NEXT:    scvtf z1.d, p0/m, z1.d
-; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; VBITS_EQ_256-NEXT:    scvtf z0.d, p0/m, z0.d
+; VBITS_EQ_256-NEXT:    st1d { z1.d }, p0, [x1]
+; VBITS_EQ_256-NEXT:    st1d { z0.d }, p0, [x1, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: scvtf_v8i32_v8f64:

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
index 11a772bae3a71..f121bd7e06dc0 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-gather.ll
@@ -383,38 +383,26 @@ define void @masked_gather_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 {
 ; Ensure sensible type legalisation.
 ; VBITS_EQ_256-LABEL: masked_gather_v8i32:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #48
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
 ; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_EQ_256-NEXT:    mov x8, #4
 ; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
-; VBITS_EQ_256-NEXT:    mov x9, sp
 ; VBITS_EQ_256-NEXT:    ptrue p1.d, vl4
-; VBITS_EQ_256-NEXT:    cmpeq p2.s, p0/z, z0.s, #0
-; VBITS_EQ_256-NEXT:    ld1d { z0.d }, p1/z, [x1, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    mov z1.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p1/z, [x1, x8, lsl #3]
 ; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p1/z, [x1]
-; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x9]
-; VBITS_EQ_256-NEXT:    ldr q1, [sp, #16]
-; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
-; VBITS_EQ_256-NEXT:    cmpne p2.d, p1/z, z1.d, #0
-; VBITS_EQ_256-NEXT:    ld1w { z0.d }, p2/z, [z0.d]
-; VBITS_EQ_256-NEXT:    ldr q1, [sp]
-; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
-; VBITS_EQ_256-NEXT:    cmpne p1.d, p1/z, z1.d, #0
-; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
-; VBITS_EQ_256-NEXT:    ld1w { z1.d }, p1/z, [z2.d]
+; VBITS_EQ_256-NEXT:    cmpeq p2.s, p0/z, z0.s, #0
+; VBITS_EQ_256-NEXT:    mov z0.s, p2/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z0.s
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
+; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
+; VBITS_EQ_256-NEXT:    cmpne p2.d, p1/z, z3.d, #0
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p1/z, z0.d, #0
+; VBITS_EQ_256-NEXT:    ld1w { z2.d }, p2/z, [z2.d]
+; VBITS_EQ_256-NEXT:    ld1w { z0.d }, p1/z, [z1.d]
 ; VBITS_EQ_256-NEXT:    ptrue p1.s, vl4
-; VBITS_EQ_256-NEXT:    uzp1 z1.s, z1.s, z1.s
+; VBITS_EQ_256-NEXT:    uzp1 z1.s, z2.s, z2.s
+; VBITS_EQ_256-NEXT:    uzp1 z0.s, z0.s, z0.s
 ; VBITS_EQ_256-NEXT:    splice z1.s, p1, z1.s, z0.s
 ; VBITS_EQ_256-NEXT:    st1w { z1.s }, p0, [x0]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_gather_v8i32:

diff  --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
index 5753577511037..f6a3955ad5c99 100644
--- a/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
+++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-masked-scatter.ll
@@ -355,39 +355,24 @@ define void @masked_scatter_v8i32(<8 x i32>* %a, <8 x i32*>* %b) #0 {
 ; Ensure sensible type legalisation.
 ; VBITS_EQ_256-LABEL: masked_scatter_v8i32:
 ; VBITS_EQ_256:       // %bb.0:
-; VBITS_EQ_256-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
-; VBITS_EQ_256-NEXT:    sub x9, sp, #80
-; VBITS_EQ_256-NEXT:    mov x29, sp
-; VBITS_EQ_256-NEXT:    and sp, x9, #0xffffffffffffffe0
-; VBITS_EQ_256-NEXT:    .cfi_def_cfa w29, 16
-; VBITS_EQ_256-NEXT:    .cfi_offset w30, -8
-; VBITS_EQ_256-NEXT:    .cfi_offset w29, -16
-; VBITS_EQ_256-NEXT:    ptrue p1.s, vl8
+; VBITS_EQ_256-NEXT:    ptrue p0.s, vl8
 ; VBITS_EQ_256-NEXT:    mov x8, #4
-; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p1/z, [x0]
-; VBITS_EQ_256-NEXT:    add x9, sp, #32
-; VBITS_EQ_256-NEXT:    mov x10, sp
-; VBITS_EQ_256-NEXT:    ptrue p0.d, vl4
-; VBITS_EQ_256-NEXT:    ld1d { z1.d }, p0/z, [x1, x8, lsl #3]
-; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p0/z, [x1]
-; VBITS_EQ_256-NEXT:    cmpeq p2.s, p1/z, z0.s, #0
-; VBITS_EQ_256-NEXT:    mov z3.s, p2/z, #-1 // =0xffffffffffffffff
-; VBITS_EQ_256-NEXT:    st1w { z3.s }, p1, [x9]
-; VBITS_EQ_256-NEXT:    st1w { z0.s }, p1, [x10]
-; VBITS_EQ_256-NEXT:    ldr q0, [sp, #32]
-; VBITS_EQ_256-NEXT:    ldr q3, [sp]
+; VBITS_EQ_256-NEXT:    ld1w { z0.s }, p0/z, [x0]
+; VBITS_EQ_256-NEXT:    ptrue p1.d, vl4
+; VBITS_EQ_256-NEXT:    ld1d { z2.d }, p1/z, [x1, x8, lsl #3]
+; VBITS_EQ_256-NEXT:    ld1d { z4.d }, p1/z, [x1]
+; VBITS_EQ_256-NEXT:    cmpeq p0.s, p0/z, z0.s, #0
+; VBITS_EQ_256-NEXT:    mov z1.s, p0/z, #-1 // =0xffffffffffffffff
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z1.s
+; VBITS_EQ_256-NEXT:    ext z1.b, z1.b, z1.b, #16
+; VBITS_EQ_256-NEXT:    cmpne p0.d, p1/z, z3.d, #0
+; VBITS_EQ_256-NEXT:    uunpklo z3.d, z0.s
+; VBITS_EQ_256-NEXT:    uunpklo z1.d, z1.s
+; VBITS_EQ_256-NEXT:    ext z0.b, z0.b, z0.b, #16
+; VBITS_EQ_256-NEXT:    cmpne p1.d, p1/z, z1.d, #0
 ; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
-; VBITS_EQ_256-NEXT:    cmpne p1.d, p0/z, z0.d, #0
-; VBITS_EQ_256-NEXT:    uunpklo z0.d, z3.s
+; VBITS_EQ_256-NEXT:    st1w { z3.d }, p0, [z4.d]
 ; VBITS_EQ_256-NEXT:    st1w { z0.d }, p1, [z2.d]
-; VBITS_EQ_256-NEXT:    ldr q0, [sp, #48]
-; VBITS_EQ_256-NEXT:    ldr q2, [sp, #16]
-; VBITS_EQ_256-NEXT:    uunpklo z0.d, z0.s
-; VBITS_EQ_256-NEXT:    cmpne p0.d, p0/z, z0.d, #0
-; VBITS_EQ_256-NEXT:    uunpklo z0.d, z2.s
-; VBITS_EQ_256-NEXT:    st1w { z0.d }, p0, [z1.d]
-; VBITS_EQ_256-NEXT:    mov sp, x29
-; VBITS_EQ_256-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; VBITS_EQ_256-NEXT:    ret
 ;
 ; VBITS_GE_512-LABEL: masked_scatter_v8i32:


        


More information about the llvm-commits mailing list