[llvm] 02db560 - [AArch64][SME]: Generate streaming-compatible code for fp-extend-trunc

Hassnaa Hamdi via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 29 04:46:02 PST 2022


Author: Hassnaa Hamdi
Date: 2022-11-29T12:45:53Z
New Revision: 02db5603ba71ab6cc38b5243d2f3ffe14dc66e9c

URL: https://github.com/llvm/llvm-project/commit/02db5603ba71ab6cc38b5243d2f3ffe14dc66e9c
DIFF: https://github.com/llvm/llvm-project/commit/02db5603ba71ab6cc38b5243d2f3ffe14dc66e9c.diff

LOG: [AArch64][SME]: Generate streaming-compatible code for fp-extend-trunc

To generate code compatible to streaming mode:
 - enable custome lowering for TruncStore to avoid crashing
   during legalizing TruncStore for non Integer vector.

Reviewed By: sdesmalen

Differential Revision: https://reviews.llvm.org/D138720

Added: 
    llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index c89a794610f4..ac5f8960095c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1398,6 +1398,15 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
 
     if (Subtarget->forceStreamingCompatibleSVE()) {
+      setTruncStoreAction(MVT::v2f32, MVT::v2f16, Custom);
+      setTruncStoreAction(MVT::v4f32, MVT::v4f16, Custom);
+      setTruncStoreAction(MVT::v8f32, MVT::v8f16, Custom);
+      setTruncStoreAction(MVT::v1f64, MVT::v1f16, Custom);
+      setTruncStoreAction(MVT::v2f64, MVT::v2f16, Custom);
+      setTruncStoreAction(MVT::v4f64, MVT::v4f16, Custom);
+      setTruncStoreAction(MVT::v1f64, MVT::v1f32, Custom);
+      setTruncStoreAction(MVT::v2f64, MVT::v2f32, Custom);
+      setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
       for (MVT VT : {MVT::v8i8, MVT::v16i8, MVT::v4i16, MVT::v8i16, MVT::v2i32,
                      MVT::v4i32, MVT::v1i64, MVT::v2i64})
         addTypeForStreamingSVE(VT);

diff  --git a/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
new file mode 100644
index 000000000000..130beca2f2c1
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-streaming-mode-fixed-length-fp-extend-trunc.ll
@@ -0,0 +1,588 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -force-streaming-compatible-sve < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;
+; FCVT H -> S
+;
+
+define void @fcvt_v2f16_v2f32(<2 x half>* %a, <2 x float>* %b) #0 {
+; CHECK-LABEL: fcvt_v2f16_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #12]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #8]
+; CHECK-NEXT:    ldr d0, [sp, #8]
+; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %op1 = load <2 x half>, <2 x half>* %a
+  %res = fpext <2 x half> %op1 to <2 x float>
+  store <2 x float> %res, <2 x float>* %b
+  ret void
+}
+
+define void @fcvt_v4f16_v4f32(<4 x half>* %a, <4 x float>* %b) #0 {
+; CHECK-LABEL: fcvt_v4f16_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr h0, [x0, #6]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #12]
+; CHECK-NEXT:    ldr h0, [x0, #4]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #8]
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #4]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %op1 = load <4 x half>, <4 x half>* %a
+  %res = fpext <4 x half> %op1 to <4 x float>
+  store <4 x float> %res, <4 x float>* %b
+  ret void
+}
+
+define void @fcvt_v8f16_v8f32(<8 x half>* %a, <8 x float>* %b) #0 {
+; CHECK-LABEL: fcvt_v8f16_v8f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    ldr h0, [x0, #14]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #28]
+; CHECK-NEXT:    ldr h0, [x0, #12]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #24]
+; CHECK-NEXT:    ldr h0, [x0, #10]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #20]
+; CHECK-NEXT:    ldr h0, [x0, #8]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #16]
+; CHECK-NEXT:    ldr h0, [x0, #6]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #12]
+; CHECK-NEXT:    ldr h0, [x0, #4]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #8]
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #4]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp]
+; CHECK-NEXT:    ldp q0, q1, [sp]
+; CHECK-NEXT:    stp q0, q1, [x1]
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ret
+  %op1 = load <8 x half>, <8 x half>* %a
+  %res = fpext <8 x half> %op1 to <8 x float>
+  store <8 x float> %res, <8 x float>* %b
+  ret void
+}
+
+define void @fcvt_v16f16_v16f32(<16 x half>* %a, <16 x float>* %b) #0 {
+; CHECK-LABEL: fcvt_v16f16_v16f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    ldr h0, [x0, #22]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #60]
+; CHECK-NEXT:    ldr h0, [x0, #20]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #56]
+; CHECK-NEXT:    ldr h0, [x0, #18]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #52]
+; CHECK-NEXT:    ldr h0, [x0, #16]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #48]
+; CHECK-NEXT:    ldr h0, [x0, #14]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #44]
+; CHECK-NEXT:    ldr h0, [x0, #12]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #40]
+; CHECK-NEXT:    ldr h0, [x0, #10]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #36]
+; CHECK-NEXT:    ldr h0, [x0, #8]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #32]
+; CHECK-NEXT:    ldr h0, [x0, #6]
+; CHECK-NEXT:    ldp q1, q3, [sp, #32]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #12]
+; CHECK-NEXT:    ldr h0, [x0, #4]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #8]
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #4]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp]
+; CHECK-NEXT:    ldr h0, [x0, #30]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #28]
+; CHECK-NEXT:    ldr h0, [x0, #28]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #24]
+; CHECK-NEXT:    ldr h0, [x0, #26]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #20]
+; CHECK-NEXT:    ldr h0, [x0, #24]
+; CHECK-NEXT:    fcvt s0, h0
+; CHECK-NEXT:    str s0, [sp, #16]
+; CHECK-NEXT:    ldp q0, q2, [sp]
+; CHECK-NEXT:    stp q0, q1, [x1]
+; CHECK-NEXT:    stp q3, q2, [x1, #32]
+; CHECK-NEXT:    add sp, sp, #64
+; CHECK-NEXT:    ret
+  %op1 = load <16 x half>, <16 x half>* %a
+  %res = fpext <16 x half> %op1 to <16 x float>
+  store <16 x float> %res, <16 x float>* %b
+  ret void
+}
+
+;
+; FCVT H -> D
+;
+
+define void @fcvt_v1f16_v1f64(<1 x half>* %a, <1 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v1f16_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <1 x half>, <1 x half>* %a
+  %res = fpext <1 x half> %op1 to <1 x double>
+  store <1 x double> %res, <1 x double>* %b
+  ret void
+}
+
+define void @fcvt_v2f16_v2f64(<2 x half>* %a, <2 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v2f16_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %op1 = load <2 x half>, <2 x half>* %a
+  %res = fpext <2 x half> %op1 to <2 x double>
+  store <2 x double> %res, <2 x double>* %b
+  ret void
+}
+
+define void @fcvt_v4f16_v4f64(<4 x half>* %a, <4 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v4f16_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    ldr h0, [x0, #6]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #24]
+; CHECK-NEXT:    ldr h0, [x0, #4]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #16]
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldp q0, q1, [sp]
+; CHECK-NEXT:    stp q0, q1, [x1]
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ret
+  %op1 = load <4 x half>, <4 x half>* %a
+  %res = fpext <4 x half> %op1 to <4 x double>
+  store <4 x double> %res, <4 x double>* %b
+  ret void
+}
+
+define void @fcvt_v8f16_v8f64(<8 x half>* %a, <8 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v8f16_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    ldr h0, [x0, #10]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #56]
+; CHECK-NEXT:    ldr h0, [x0, #8]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #48]
+; CHECK-NEXT:    ldr h0, [x0, #6]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #40]
+; CHECK-NEXT:    ldr h0, [x0, #4]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #32]
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    ldp q1, q3, [sp, #32]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldr h0, [x0, #14]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #24]
+; CHECK-NEXT:    ldr h0, [x0, #12]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #16]
+; CHECK-NEXT:    ldp q0, q2, [sp]
+; CHECK-NEXT:    stp q0, q1, [x1]
+; CHECK-NEXT:    stp q3, q2, [x1, #32]
+; CHECK-NEXT:    add sp, sp, #64
+; CHECK-NEXT:    ret
+  %op1 = load <8 x half>, <8 x half>* %a
+  %res = fpext <8 x half> %op1 to <8 x double>
+  store <8 x double> %res, <8 x double>* %b
+  ret void
+}
+
+define void @fcvt_v16f16_v16f64(<16 x half>* %a, <16 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v16f16_v16f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #128
+; CHECK-NEXT:    .cfi_def_cfa_offset 128
+; CHECK-NEXT:    ldr h0, [x0, #26]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #24]
+; CHECK-NEXT:    ldr h0, [x0, #24]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #16]
+; CHECK-NEXT:    ldr h0, [x0, #6]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #88]
+; CHECK-NEXT:    ldr h0, [x0, #4]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #80]
+; CHECK-NEXT:    ldr h0, [x0, #2]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr h0, [x0]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldr h0, [x0, #14]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #72]
+; CHECK-NEXT:    ldr h0, [x0, #12]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #64]
+; CHECK-NEXT:    ldr h0, [x0, #10]
+; CHECK-NEXT:    ldp q3, q1, [sp, #64]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #104]
+; CHECK-NEXT:    ldr h0, [x0, #8]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #96]
+; CHECK-NEXT:    ldr h0, [x0, #22]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #56]
+; CHECK-NEXT:    ldr h0, [x0, #20]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #48]
+; CHECK-NEXT:    ldr h0, [x0, #18]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #120]
+; CHECK-NEXT:    ldr h0, [x0, #16]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #112]
+; CHECK-NEXT:    ldr h0, [x0, #30]
+; CHECK-NEXT:    ldp q6, q4, [sp, #96]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #40]
+; CHECK-NEXT:    ldr h0, [x0, #28]
+; CHECK-NEXT:    fcvt d0, h0
+; CHECK-NEXT:    str d0, [sp, #32]
+; CHECK-NEXT:    ldp q2, q0, [sp]
+; CHECK-NEXT:    ldp q7, q5, [sp, #32]
+; CHECK-NEXT:    stp q2, q1, [x1]
+; CHECK-NEXT:    stp q6, q3, [x1, #32]
+; CHECK-NEXT:    stp q0, q7, [x1, #96]
+; CHECK-NEXT:    stp q4, q5, [x1, #64]
+; CHECK-NEXT:    add sp, sp, #128
+; CHECK-NEXT:    ret
+  %op1 = load <16 x half>, <16 x half>* %a
+  %res = fpext <16 x half> %op1 to <16 x double>
+  store <16 x double> %res, <16 x double>* %b
+  ret void
+}
+
+;
+; FCVT S -> D
+;
+
+define void @fcvt_v1f32_v1f64(<1 x float>* %a, <1 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v1f32_v1f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <1 x float>, <1 x float>* %a
+  %res = fpext <1 x float> %op1 to <1 x double>
+  store <1 x double> %res, <1 x double>* %b
+  ret void
+}
+
+define void @fcvt_v2f32_v2f64(<2 x float>* %a, <2 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v2f32_v2f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #16
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-NEXT:    ldr s0, [x0, #4]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldr q0, [sp]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    add sp, sp, #16
+; CHECK-NEXT:    ret
+  %op1 = load <2 x float>, <2 x float>* %a
+  %res = fpext <2 x float> %op1 to <2 x double>
+  store <2 x double> %res, <2 x double>* %b
+  ret void
+}
+
+define void @fcvt_v4f32_v4f64(<4 x float>* %a, <4 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v4f32_v4f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #32
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    ldr s0, [x0, #12]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #24]
+; CHECK-NEXT:    ldr s0, [x0, #8]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #16]
+; CHECK-NEXT:    ldr s0, [x0, #4]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldp q0, q1, [sp]
+; CHECK-NEXT:    stp q0, q1, [x1]
+; CHECK-NEXT:    add sp, sp, #32
+; CHECK-NEXT:    ret
+  %op1 = load <4 x float>, <4 x float>* %a
+  %res = fpext <4 x float> %op1 to <4 x double>
+  store <4 x double> %res, <4 x double>* %b
+  ret void
+}
+
+define void @fcvt_v8f32_v8f64(<8 x float>* %a, <8 x double>* %b) #0 {
+; CHECK-LABEL: fcvt_v8f32_v8f64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sub sp, sp, #64
+; CHECK-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-NEXT:    ldr s0, [x0, #20]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #56]
+; CHECK-NEXT:    ldr s0, [x0, #16]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #48]
+; CHECK-NEXT:    ldr s0, [x0, #12]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #40]
+; CHECK-NEXT:    ldr s0, [x0, #8]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #32]
+; CHECK-NEXT:    ldr s0, [x0, #4]
+; CHECK-NEXT:    ldp q1, q3, [sp, #32]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #8]
+; CHECK-NEXT:    ldr s0, [x0]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp]
+; CHECK-NEXT:    ldr s0, [x0, #28]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #24]
+; CHECK-NEXT:    ldr s0, [x0, #24]
+; CHECK-NEXT:    fcvt d0, s0
+; CHECK-NEXT:    str d0, [sp, #16]
+; CHECK-NEXT:    ldp q0, q2, [sp]
+; CHECK-NEXT:    stp q0, q1, [x1]
+; CHECK-NEXT:    stp q3, q2, [x1, #32]
+; CHECK-NEXT:    add sp, sp, #64
+; CHECK-NEXT:    ret
+  %op1 = load <8 x float>, <8 x float>* %a
+  %res = fpext <8 x float> %op1 to <8 x double>
+  store <8 x double> %res, <8 x double>* %b
+  ret void
+}
+
+;
+; FCVT S -> H
+;
+
+define void @fcvt_v2f32_v2f16(<2 x float>* %a, <2 x half>* %b) #0 {
+; CHECK-LABEL: fcvt_v2f32_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl2
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    st1h { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x float>, <2 x float>* %a
+  %res = fptrunc <2 x float> %op1 to <2 x half>
+  store <2 x half> %res, <2 x half>* %b
+  ret void
+}
+
+define void @fcvt_v4f32_v4f16(<4 x float>* %a, <4 x half>* %b) #0 {
+; CHECK-LABEL: fcvt_v4f32_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    st1h { z0.s }, p0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x float>, <4 x float>* %a
+  %res = fptrunc <4 x float> %op1 to <4 x half>
+  store <4 x half> %res, <4 x half>* %b
+  ret void
+}
+
+define void @fcvt_v8f32_v8f16(<8 x float>* %a, <8 x half>* %b) #0 {
+; CHECK-LABEL: fcvt_v8f32_v8f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    mov x8, #4
+; CHECK-NEXT:    ptrue p0.s, vl4
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.s
+; CHECK-NEXT:    st1h { z0.s }, p0, [x1]
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.s
+; CHECK-NEXT:    st1h { z1.s }, p0, [x1, x8, lsl #1]
+; CHECK-NEXT:    ret
+  %op1 = load <8 x float>, <8 x float>* %a
+  %res = fptrunc <8 x float> %op1 to <8 x half>
+  store <8 x half> %res, <8 x half>* %b
+  ret void
+}
+
+;
+; FCVT D -> H
+;
+
+define void @fcvt_v1f64_v1f16(<1 x double>* %a, <1 x half>* %b) #0 {
+; CHECK-LABEL: fcvt_v1f64_v1f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr d0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
+; CHECK-NEXT:    st1h { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <1 x double>, <1 x double>* %a
+  %res = fptrunc <1 x double> %op1 to <1 x half>
+  store <1 x half> %res, <1 x half>* %b
+  ret void
+}
+
+define void @fcvt_v2f64_v2f16(<2 x double>* %a, <2 x half>* %b) #0 {
+; CHECK-LABEL: fcvt_v2f64_v2f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
+; CHECK-NEXT:    st1h { z0.d }, p0, [x1]
+; CHECK-NEXT:    ret
+  %op1 = load <2 x double>, <2 x double>* %a
+  %res = fptrunc <2 x double> %op1 to <2 x half>
+  store <2 x half> %res, <2 x half>* %b
+  ret void
+}
+
+define void @fcvt_v4f64_v4f16(<4 x double>* %a, <4 x half>* %b) #0 {
+; CHECK-LABEL: fcvt_v4f64_v4f16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    mov x8, #2
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvt z0.h, p0/m, z0.d
+; CHECK-NEXT:    st1h { z0.d }, p0, [x1]
+; CHECK-NEXT:    fcvt z1.h, p0/m, z1.d
+; CHECK-NEXT:    st1h { z1.d }, p0, [x1, x8, lsl #1]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x double>, <4 x double>* %a
+  %res = fptrunc <4 x double> %op1 to <4 x half>
+  store <4 x half> %res, <4 x half>* %b
+  ret void
+}
+
+;
+; FCVT D -> S
+;
+
+define void @fcvt_v1f64_v1f32(<1 x double> %op1, <1 x float>* %b) #0 {
+; CHECK-LABEL: fcvt_v1f64_v1f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $d0 killed $d0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl1
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
+; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %res = fptrunc <1 x double> %op1 to <1 x float>
+  store <1 x float> %res, <1 x float>* %b
+  ret void
+}
+
+define void @fcvt_v2f64_v2f32(<2 x double> %op1, <2 x float>* %b) #0 {
+; CHECK-LABEL: fcvt_v2f64_v2f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    // kill: def $q0 killed $q0 def $z0
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
+; CHECK-NEXT:    st1w { z0.d }, p0, [x0]
+; CHECK-NEXT:    ret
+  %res = fptrunc <2 x double> %op1 to <2 x float>
+  store <2 x float> %res, <2 x float>* %b
+  ret void
+}
+
+define void @fcvt_v4f64_v4f32(<4 x double>* %a, <4 x float>* %b) #0 {
+; CHECK-LABEL: fcvt_v4f64_v4f32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldp q0, q1, [x0]
+; CHECK-NEXT:    mov x8, #2
+; CHECK-NEXT:    ptrue p0.d, vl2
+; CHECK-NEXT:    fcvt z0.s, p0/m, z0.d
+; CHECK-NEXT:    st1w { z0.d }, p0, [x1]
+; CHECK-NEXT:    fcvt z1.s, p0/m, z1.d
+; CHECK-NEXT:    st1w { z1.d }, p0, [x1, x8, lsl #2]
+; CHECK-NEXT:    ret
+  %op1 = load <4 x double>, <4 x double>* %a
+  %res = fptrunc <4 x double> %op1 to <4 x float>
+  store <4 x float> %res, <4 x float>* %b
+  ret void
+}
+
+attributes #0 = { "target-features"="+sve" }


        


More information about the llvm-commits mailing list