[llvm] 8aff167 - [AArch64][SME] Improve streaming-compatible codegen for extending loads/truncating stores.

Sander de Smalen via llvm-commits llvm-commits at lists.llvm.org
Mon Jan 9 07:08:23 PST 2023


Author: Sander de Smalen
Date: 2023-01-09T15:08:04Z
New Revision: 8aff167b34c038b711a93276fdb8ef2f123a29b9

URL: https://github.com/llvm/llvm-project/commit/8aff167b34c038b711a93276fdb8ef2f123a29b9
DIFF: https://github.com/llvm/llvm-project/commit/8aff167b34c038b711a93276fdb8ef2f123a29b9.diff

LOG: [AArch64][SME] Improve streaming-compatible codegen for extending loads/truncating stores.

This is another step in aligning addTypeForStreamingSVE with addTypeForFixedLengthSVE,
which also improves code quality for extending loads and truncating stores.

Reviewed By: hassnaa-arm

Differential Revision: https://reviews.llvm.org/D141266

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 63fa0d50ba4a..74d61c2e4ea7 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1687,6 +1687,30 @@ void AArch64TargetLowering::addTypeForStreamingSVE(MVT VT) {
   setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
   setOperationAction(ISD::BITCAST, VT, Legal);
 
+  // Mark integer truncating stores/extending loads as having custom lowering
+  if (VT.isInteger()) {
+    MVT InnerVT = VT.changeVectorElementType(MVT::i8);
+    while (InnerVT != VT) {
+      setTruncStoreAction(VT, InnerVT, Custom);
+      setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Custom);
+      setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Custom);
+      InnerVT = InnerVT.changeVectorElementType(
+          MVT::getIntegerVT(2 * InnerVT.getScalarSizeInBits()));
+    }
+  }
+
+  // Mark floating-point truncating stores/extending loads as having custom
+  // lowering
+  if (VT.isFloatingPoint()) {
+    MVT InnerVT = VT.changeVectorElementType(MVT::f16);
+    while (InnerVT != VT) {
+      setTruncStoreAction(VT, InnerVT, Custom);
+      setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Custom);
+      InnerVT = InnerVT.changeVectorElementType(
+          MVT::getFloatingPointVT(2 * InnerVT.getScalarSizeInBits()));
+    }
+  }
+
   setOperationAction(ISD::ABS, VT, Custom);
   setOperationAction(ISD::ADD, VT, Custom);
   setOperationAction(ISD::AND, VT, Custom);
@@ -6079,7 +6103,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
   case ISD::MLOAD:
     return LowerMLOAD(Op, DAG);
   case ISD::LOAD:
-    if (useSVEForFixedLengthVectorVT(Op.getValueType()))
+    if (useSVEForFixedLengthVectorVT(Op.getValueType(),
+                                     Subtarget->forceStreamingCompatibleSVE()))
       return LowerFixedLengthVectorLoadToSVE(Op, DAG);
     return LowerLOAD(Op, DAG);
   case ISD::ADD:

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
index 66b002928927..33a134ecee96 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-bitcast.ll
@@ -6,9 +6,8 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @bitcast_v4i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: bitcast_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
 ; CHECK-NEXT:    ret
   %load = load volatile <4 x i8>, ptr %a

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
index 3e6892a01840..94b4aad294f0 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-ext-loads.ll
@@ -6,11 +6,8 @@ target triple = "aarch64-unknown-linux-gnu"
 define <8 x i16> @load_zext_v8i8i16(ptr %ap)  #0 {
 ; CHECK-LABEL: load_zext_v8i8i16:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldp s1, s0, [x0]
-; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    uunpklo z2.h, z0.b
-; CHECK-NEXT:    uunpklo z0.h, z1.b
-; CHECK-NEXT:    splice z0.h, p0, z0.h, z2.h
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <8 x i8>, ptr %ap
@@ -21,8 +18,8 @@ define <8 x i16> @load_zext_v8i8i16(ptr %ap)  #0 {
 define <4 x i32> @load_zext_v4i16i32(ptr %ap)  #0 {
 ; CHECK-LABEL: load_zext_v4i16i32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    uunpklo z0.s, z0.h
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <4 x i16>, ptr %ap
@@ -33,8 +30,8 @@ define <4 x i32> @load_zext_v4i16i32(ptr %ap)  #0 {
 define <2 x i64> @load_zext_v2i32i64(ptr %ap) #0 {
 ; CHECK-LABEL: load_zext_v2i32i64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr d0, [x0]
-; CHECK-NEXT:    uunpklo z0.d, z0.s
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $q0 killed $q0 killed $z0
 ; CHECK-NEXT:    ret
   %a = load <2 x i32>, ptr %ap

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
index 75cf13137f70..c04534390e8c 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fcopysign.ll
@@ -212,21 +212,14 @@ define void @test_copysign_v4f32_v4f64(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v2f64_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldr s0, [x1, #4]
-; CHECK-NEXT:    ldr q1, [x0]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr s0, [x1]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldr q0, [sp]
-; CHECK-NEXT:    and z0.d, z0.d, #0x8000000000000000
-; CHECK-NEXT:    orr z0.d, z1.d, z0.d
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x1]
+; CHECK-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK-NEXT:    fcvt z1.d, p0/m, z1.s
+; CHECK-NEXT:    and z1.d, z1.d, #0x8000000000000000
+; CHECK-NEXT:    orr z0.d, z0.d, z1.d
 ; CHECK-NEXT:    str q0, [x0]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %a = load <2 x double>, ptr %ap
   %b = load < 2 x float>, ptr %bp
@@ -242,30 +235,20 @@ define void @test_copysign_v2f64_v2f32(ptr %ap, ptr %bp) #0 {
 define void @test_copysign_v4f64_v4f32(ptr %ap, ptr %bp) #0 {
 ; CHECK-LABEL: test_copysign_v4f64_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldr s0, [x1, #12]
-; CHECK-NEXT:    ldp q2, q1, [x0]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    and z2.d, z2.d, #0x7fffffffffffffff
-; CHECK-NEXT:    str d0, [sp, #24]
+; CHECK-NEXT:    mov x8, #2
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    ld1w { z2.d }, p0/z, [x1, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z3.d }, p0/z, [x1]
+; CHECK-NEXT:    and z0.d, z0.d, #0x7fffffffffffffff
+; CHECK-NEXT:    fcvt z3.d, p0/m, z3.s
+; CHECK-NEXT:    fcvt z2.d, p0/m, z2.s
 ; CHECK-NEXT:    and z1.d, z1.d, #0x7fffffffffffffff
-; CHECK-NEXT:    ldr s0, [x1, #8]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #16]
-; CHECK-NEXT:    ldr s0, [x1, #4]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr s0, [x1]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldp q3, q0, [sp]
 ; CHECK-NEXT:    and z3.d, z3.d, #0x8000000000000000
-; CHECK-NEXT:    and z0.d, z0.d, #0x8000000000000000
-; CHECK-NEXT:    orr z0.d, z1.d, z0.d
-; CHECK-NEXT:    orr z1.d, z2.d, z3.d
-; CHECK-NEXT:    stp q1, q0, [x0]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    and z2.d, z2.d, #0x8000000000000000
+; CHECK-NEXT:    orr z0.d, z0.d, z3.d
+; CHECK-NEXT:    orr z1.d, z1.d, z2.d
+; CHECK-NEXT:    stp q0, q1, [x0]
 ; CHECK-NEXT:    ret
   %a = load <4 x double>, ptr %ap
   %b = load <4 x float>, ptr %bp

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
index 7abde39f9e8e..926cb24e5842 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
@@ -10,17 +10,10 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @fcvt_v2f16_v2f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f16_v2f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #12]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #8]
-; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.h
 ; CHECK-NEXT:    str d0, [x1]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %op1 = load <2 x half>, ptr %a
   %res = fpext <2 x half> %op1 to <2 x float>
@@ -31,23 +24,10 @@ define void @fcvt_v2f16_v2f32(ptr %a, ptr %b) #0 {
 define void @fcvt_v4f16_v4f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f16_v4f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldr h0, [x0, #6]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #12]
-; CHECK-NEXT:    ldr h0, [x0, #4]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #8]
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #4]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp]
-; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.h
 ; CHECK-NEXT:    str q0, [x1]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %op1 = load <4 x half>, ptr %a
   %res = fpext <4 x half> %op1 to <4 x float>
@@ -58,35 +38,13 @@ define void @fcvt_v4f16_v4f32(ptr %a, ptr %b) #0 {
 define void @fcvt_v8f16_v8f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v8f16_v8f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldr h0, [x0, #14]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #28]
-; CHECK-NEXT:    ldr h0, [x0, #12]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #24]
-; CHECK-NEXT:    ldr h0, [x0, #10]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #20]
-; CHECK-NEXT:    ldr h0, [x0, #8]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #16]
-; CHECK-NEXT:    ldr h0, [x0, #6]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #12]
-; CHECK-NEXT:    ldr h0, [x0, #4]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #8]
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #4]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp]
-; CHECK-NEXT:    ldp q0, q1, [sp]
-; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    mov x8, #4
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.h
+; CHECK-NEXT:    fcvt z1.s, p0/m, z1.h
+; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <8 x half>, ptr %a
   %res = fpext <8 x half> %op1 to <8 x float>
@@ -97,61 +55,22 @@ define void @fcvt_v8f16_v8f32(ptr %a, ptr %b) #0 {
 define void @fcvt_v16f16_v16f32(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v16f16_v16f32:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
-; CHECK-NEXT:    ldr h0, [x0, #22]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #60]
-; CHECK-NEXT:    ldr h0, [x0, #20]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #56]
-; CHECK-NEXT:    ldr h0, [x0, #18]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #52]
-; CHECK-NEXT:    ldr h0, [x0, #16]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #48]
-; CHECK-NEXT:    ldr h0, [x0, #14]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #44]
-; CHECK-NEXT:    ldr h0, [x0, #12]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #40]
-; CHECK-NEXT:    ldr h0, [x0, #10]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #36]
-; CHECK-NEXT:    ldr h0, [x0, #8]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #32]
-; CHECK-NEXT:    ldr h0, [x0, #6]
-; CHECK-NEXT:    ldp q1, q3, [sp, #32]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #12]
-; CHECK-NEXT:    ldr h0, [x0, #4]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #8]
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #4]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp]
-; CHECK-NEXT:    ldr h0, [x0, #30]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #28]
-; CHECK-NEXT:    ldr h0, [x0, #28]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #24]
-; CHECK-NEXT:    ldr h0, [x0, #26]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #20]
-; CHECK-NEXT:    ldr h0, [x0, #24]
-; CHECK-NEXT:    fcvt s0, h0
-; CHECK-NEXT:    str s0, [sp, #16]
-; CHECK-NEXT:    ldp q0, q2, [sp]
+; CHECK-NEXT:    mov x8, #8
+; CHECK-NEXT:    mov x9, #12
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    mov x10, #4
+; CHECK-NEXT:    ld1h { z0.s }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    ld1h { z1.s }, p0/z, [x0, x9, lsl #1]
+; CHECK-NEXT:    ld1h { z2.s }, p0/z, [x0, x10, lsl #1]
+; CHECK-NEXT:    ld1h { z3.s }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.h
+; CHECK-NEXT:    fcvt z1.s, p0/m, z1.h
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    movprfx z0, z3
+; CHECK-NEXT:    fcvt z0.s, p0/m, z3.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvt z1.s, p0/m, z2.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    stp q3, q2, [x1, #32]
-; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
   %res = fpext <16 x half> %op1 to <16 x float>
@@ -179,17 +98,10 @@ define void @fcvt_v1f16_v1f64(ptr %a, ptr %b) #0 {
 define void @fcvt_v2f16_v2f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f16_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
 ; CHECK-NEXT:    str q0, [x1]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %op1 = load <2 x half>, ptr %a
   %res = fpext <2 x half> %op1 to <2 x double>
@@ -200,23 +112,13 @@ define void @fcvt_v2f16_v2f64(ptr %a, ptr %b) #0 {
 define void @fcvt_v4f16_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f16_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldr h0, [x0, #6]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #24]
-; CHECK-NEXT:    ldr h0, [x0, #4]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #16]
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldp q0, q1, [sp]
-; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    mov x8, #2
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvt z1.d, p0/m, z1.h
+; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x half>, ptr %a
   %res = fpext <4 x half> %op1 to <4 x double>
@@ -227,37 +129,22 @@ define void @fcvt_v4f16_v4f64(ptr %a, ptr %b) #0 {
 define void @fcvt_v8f16_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v8f16_v8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
-; CHECK-NEXT:    ldr h0, [x0, #10]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #56]
-; CHECK-NEXT:    ldr h0, [x0, #8]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #48]
-; CHECK-NEXT:    ldr h0, [x0, #6]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #40]
-; CHECK-NEXT:    ldr h0, [x0, #4]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #32]
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    ldp q1, q3, [sp, #32]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldr h0, [x0, #14]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #24]
-; CHECK-NEXT:    ldr h0, [x0, #12]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #16]
-; CHECK-NEXT:    ldp q0, q2, [sp]
+; CHECK-NEXT:    mov x8, #4
+; CHECK-NEXT:    mov x9, #6
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    mov x10, #2
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, x9, lsl #1]
+; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, x10, lsl #1]
+; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvt z1.d, p0/m, z1.h
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    movprfx z0, z3
+; CHECK-NEXT:    fcvt z0.d, p0/m, z3.h
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvt z1.d, p0/m, z2.h
 ; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    stp q3, q2, [x1, #32]
-; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
   %op1 = load <8 x half>, ptr %a
   %res = fpext <8 x half> %op1 to <8 x double>
@@ -268,65 +155,40 @@ define void @fcvt_v8f16_v8f64(ptr %a, ptr %b) #0 {
 define void @fcvt_v16f16_v16f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v16f16_v16f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #128
-; CHECK-NEXT:    .cfi_def_cfa_offset 128
-; CHECK-NEXT:    ldr h0, [x0, #26]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #24]
-; CHECK-NEXT:    ldr h0, [x0, #24]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #16]
-; CHECK-NEXT:    ldr h0, [x0, #6]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #88]
-; CHECK-NEXT:    ldr h0, [x0, #4]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #80]
-; CHECK-NEXT:    ldr h0, [x0, #2]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr h0, [x0]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldr h0, [x0, #14]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #72]
-; CHECK-NEXT:    ldr h0, [x0, #12]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #64]
-; CHECK-NEXT:    ldr h0, [x0, #10]
-; CHECK-NEXT:    ldp q3, q1, [sp, #64]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #104]
-; CHECK-NEXT:    ldr h0, [x0, #8]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #96]
-; CHECK-NEXT:    ldr h0, [x0, #22]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #56]
-; CHECK-NEXT:    ldr h0, [x0, #20]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #48]
-; CHECK-NEXT:    ldr h0, [x0, #18]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #120]
-; CHECK-NEXT:    ldr h0, [x0, #16]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #112]
-; CHECK-NEXT:    ldr h0, [x0, #30]
-; CHECK-NEXT:    ldp q6, q4, [sp, #96]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #40]
-; CHECK-NEXT:    ldr h0, [x0, #28]
-; CHECK-NEXT:    fcvt d0, h0
-; CHECK-NEXT:    str d0, [sp, #32]
-; CHECK-NEXT:    ldp q2, q0, [sp]
-; CHECK-NEXT:    ldp q7, q5, [sp, #32]
-; CHECK-NEXT:    stp q2, q1, [x1]
-; CHECK-NEXT:    stp q6, q3, [x1, #32]
-; CHECK-NEXT:    stp q0, q7, [x1, #96]
-; CHECK-NEXT:    stp q4, q5, [x1, #64]
-; CHECK-NEXT:    add sp, sp, #128
+; CHECK-NEXT:    mov x9, #14
+; CHECK-NEXT:    mov x10, #12
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    mov x8, #2
+; CHECK-NEXT:    mov x11, #6
+; CHECK-NEXT:    mov x12, #4
+; CHECK-NEXT:    ld1h { z0.d }, p0/z, [x0, x9, lsl #1]
+; CHECK-NEXT:    ld1h { z1.d }, p0/z, [x0, x10, lsl #1]
+; CHECK-NEXT:    mov x9, #8
+; CHECK-NEXT:    mov x10, #10
+; CHECK-NEXT:    ld1h { z2.d }, p0/z, [x0, x8, lsl #1]
+; CHECK-NEXT:    ld1h { z3.d }, p0/z, [x0, x11, lsl #1]
+; CHECK-NEXT:    ld1h { z5.d }, p0/z, [x0, x12, lsl #1]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.h
+; CHECK-NEXT:    fcvt z1.d, p0/m, z1.h
+; CHECK-NEXT:    ld1h { z4.d }, p0/z, [x0, x9, lsl #1]
+; CHECK-NEXT:    ld1h { z6.d }, p0/z, [x0, x10, lsl #1]
+; CHECK-NEXT:    ld1h { z7.d }, p0/z, [x0]
+; CHECK-NEXT:    stp q1, q0, [x1, #96]
+; CHECK-NEXT:    movprfx z1, z4
+; CHECK-NEXT:    fcvt z1.d, p0/m, z4.h
+; CHECK-NEXT:    movprfx z0, z6
+; CHECK-NEXT:    fcvt z0.d, p0/m, z6.h
+; CHECK-NEXT:    stp q1, q0, [x1, #64]
+; CHECK-NEXT:    movprfx z1, z5
+; CHECK-NEXT:    fcvt z1.d, p0/m, z5.h
+; CHECK-NEXT:    movprfx z0, z3
+; CHECK-NEXT:    fcvt z0.d, p0/m, z3.h
+; CHECK-NEXT:    stp q1, q0, [x1, #32]
+; CHECK-NEXT:    movprfx z1, z7
+; CHECK-NEXT:    fcvt z1.d, p0/m, z7.h
+; CHECK-NEXT:    movprfx z0, z2
+; CHECK-NEXT:    fcvt z0.d, p0/m, z2.h
+; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <16 x half>, ptr %a
   %res = fpext <16 x half> %op1 to <16 x double>
@@ -354,17 +216,10 @@ define void @fcvt_v1f32_v1f64(ptr %a, ptr %b) #0 {
 define void @fcvt_v2f32_v2f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v2f32_v2f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    ldr s0, [x0, #4]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr s0, [x0]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
 ; CHECK-NEXT:    str q0, [x1]
-; CHECK-NEXT:    add sp, sp, #16
 ; CHECK-NEXT:    ret
   %op1 = load <2 x float>, ptr %a
   %res = fpext <2 x float> %op1 to <2 x double>
@@ -375,23 +230,13 @@ define void @fcvt_v2f32_v2f64(ptr %a, ptr %b) #0 {
 define void @fcvt_v4f32_v4f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v4f32_v4f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #32
-; CHECK-NEXT:    .cfi_def_cfa_offset 32
-; CHECK-NEXT:    ldr s0, [x0, #12]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #24]
-; CHECK-NEXT:    ldr s0, [x0, #8]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #16]
-; CHECK-NEXT:    ldr s0, [x0, #4]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr s0, [x0]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldp q0, q1, [sp]
-; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    mov x8, #2
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK-NEXT:    fcvt z1.d, p0/m, z1.s
+; CHECK-NEXT:    stp q1, q0, [x1]
 ; CHECK-NEXT:    ret
   %op1 = load <4 x float>, ptr %a
   %res = fpext <4 x float> %op1 to <4 x double>
@@ -402,37 +247,22 @@ define void @fcvt_v4f32_v4f64(ptr %a, ptr %b) #0 {
 define void @fcvt_v8f32_v8f64(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: fcvt_v8f32_v8f64:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    sub sp, sp, #64
-; CHECK-NEXT:    .cfi_def_cfa_offset 64
-; CHECK-NEXT:    ldr s0, [x0, #20]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #56]
-; CHECK-NEXT:    ldr s0, [x0, #16]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #48]
-; CHECK-NEXT:    ldr s0, [x0, #12]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #40]
-; CHECK-NEXT:    ldr s0, [x0, #8]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #32]
-; CHECK-NEXT:    ldr s0, [x0, #4]
-; CHECK-NEXT:    ldp q1, q3, [sp, #32]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #8]
-; CHECK-NEXT:    ldr s0, [x0]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp]
-; CHECK-NEXT:    ldr s0, [x0, #28]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #24]
-; CHECK-NEXT:    ldr s0, [x0, #24]
-; CHECK-NEXT:    fcvt d0, s0
-; CHECK-NEXT:    str d0, [sp, #16]
-; CHECK-NEXT:    ldp q0, q2, [sp]
+; CHECK-NEXT:    mov x8, #4
+; CHECK-NEXT:    mov x9, #6
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    mov x10, #2
+; CHECK-NEXT:    ld1w { z0.d }, p0/z, [x0, x8, lsl #2]
+; CHECK-NEXT:    ld1w { z1.d }, p0/z, [x0, x9, lsl #2]
+; CHECK-NEXT:    ld1w { z2.d }, p0/z, [x0, x10, lsl #2]
+; CHECK-NEXT:    ld1w { z3.d }, p0/z, [x0]
+; CHECK-NEXT:    fcvt z0.d, p0/m, z0.s
+; CHECK-NEXT:    fcvt z1.d, p0/m, z1.s
+; CHECK-NEXT:    stp q0, q1, [x1, #32]
+; CHECK-NEXT:    movprfx z0, z3
+; CHECK-NEXT:    fcvt z0.d, p0/m, z3.s
+; CHECK-NEXT:    movprfx z1, z2
+; CHECK-NEXT:    fcvt z1.d, p0/m, z2.s
 ; CHECK-NEXT:    stp q0, q1, [x1]
-; CHECK-NEXT:    stp q3, q2, [x1, #32]
-; CHECK-NEXT:    add sp, sp, #64
 ; CHECK-NEXT:    ret
   %op1 = load <8 x float>, ptr %a
   %res = fpext <8 x float> %op1 to <8 x double>

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
index 96abfcda50b8..251f6ea48920 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-loads.ll
@@ -6,8 +6,8 @@ target triple = "aarch64-unknown-linux-gnu"
 define <4 x i8> @load_v4i8(ptr %a) #0 {
 ; CHECK-LABEL: load_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr s0, [x0]
-; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    ptrue p0.h, vl4
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $z0
 ; CHECK-NEXT:    ret
   %load = load <4 x i8>, ptr %a

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
index 020d44b19de0..904fac734ca1 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-optimize-ptrue.ll
@@ -6,11 +6,9 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @add_v4i8(ptr %a, ptr %b) #0 {
 ; CHECK-LABEL: add_v4i8:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    ldr s1, [x1]
-; CHECK-NEXT:    uunpklo z0.h, z0.b
-; CHECK-NEXT:    uunpklo z1.h, z1.b
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
+; CHECK-NEXT:    ld1b { z1.h }, p0/z, [x1]
 ; CHECK-NEXT:    add z0.h, z0.h, z1.h
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x0]
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll
index 1c43023aabd2..6a283a62792f 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-subvector.ll
@@ -17,9 +17,8 @@ target triple = "aarch64-unknown-linux-gnu"
 define void @subvector_v4i8(ptr %in, ptr %out) #0 {
 ; CHECK-LABEL: subvector_v4i8:
 ; CHECK:       // %bb.0: // %bb1
-; CHECK-NEXT:    ldr s0, [x0]
 ; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    uunpklo z0.h, z0.b
+; CHECK-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
 ; CHECK-NEXT:    ret
   %a = load <4 x i8>, ptr %in

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
index 8aae6a70181d..f084d01f99d7 100644
--- a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-trunc-stores.ll
@@ -7,8 +7,8 @@ define void @store_trunc_v8i16i8(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v8i16i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    uzp1 z0.b, z0.b, z0.b
-; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ptrue p0.h, vl8
+; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
 ; CHECK-NEXT:    ret
   %a = load <8 x i16>, ptr %ap
   %val = trunc <8 x i16> %a to <8 x i8>
@@ -20,9 +20,8 @@ define void @store_trunc_v4i32i8(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v4i32i8:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    ptrue p0.h, vl4
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK-NEXT:    st1b { z0.h }, p0, [x1]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    st1b { z0.s }, p0, [x1]
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %ap
   %val = trunc <4 x i32> %a to <4 x i8>
@@ -34,8 +33,8 @@ define void @store_trunc_v4i32i16(ptr %ap, ptr %dest) #0 {
 ; CHECK-LABEL: store_trunc_v4i32i16:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    ldr q0, [x0]
-; CHECK-NEXT:    uzp1 z0.h, z0.h, z0.h
-; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    st1h { z0.s }, p0, [x1]
 ; CHECK-NEXT:    ret
   %a = load <4 x i32>, ptr %ap
   %val = trunc <4 x i32> %a to <4 x i16>


        


More information about the llvm-commits mailing list