[llvm] 173514d - [AArch64] Add fp128 tests in `fcmp.ll`. NFC

Tianyi Guan via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 4 00:59:35 PDT 2024


Author: Tianyi Guan
Date: 2024-07-04T08:58:53+01:00
New Revision: 173514d58ec4e6166670f1e37a038df3865c8b96

URL: https://github.com/llvm/llvm-project/commit/173514d58ec4e6166670f1e37a038df3865c8b96
DIFF: https://github.com/llvm/llvm-project/commit/173514d58ec4e6166670f1e37a038df3865c8b96.diff

LOG: [AArch64] Add fp128 tests in `fcmp.ll`. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/fcmp.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/fcmp.ll b/llvm/test/CodeGen/AArch64/fcmp.ll
index b1ca88975a621..a76d0b36fa1aa 100644
--- a/llvm/test/CodeGen/AArch64/fcmp.ll
+++ b/llvm/test/CodeGen/AArch64/fcmp.ll
@@ -4,6 +4,226 @@
 ; RUN: llc -mtriple=aarch64 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-NOFP16
 ; RUN: llc -mtriple=aarch64 -mattr=+fullfp16 -global-isel -global-isel-abort=2 -verify-machineinstrs %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI,CHECK-GI-FP16
 
+
+; CHECK-GI:      warning: Instruction selection used fallback path for f128_fp128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_i128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_double
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_float
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_i32
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for f128_half
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v2f128_fp128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v3f128_fp128
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v2f128_double
+; CHECK-GI-NEXT: warning: Instruction selection used fallback path for v3f128_double
+
+
+define fp128 @f128_fp128(fp128 %a, fp128 %b, fp128 %d, fp128 %e) {
+; CHECK-LABEL: f128_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q2, q3, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    b.ge .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %entry
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:  .LBB0_2: // %entry
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt fp128 %a, %b
+  %s = select i1 %c, fp128 %d, fp128 %e
+  ret fp128 %s
+}
+
+define i128 @f128_i128(fp128 %a, fp128 %b, i128 %d, i128 %e) {
+; CHECK-LABEL: f128_i128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #80
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    stp x22, x21, [sp, #48] // 16-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 80
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w20, -16
+; CHECK-NEXT:    .cfi_offset w21, -24
+; CHECK-NEXT:    .cfi_offset w22, -32
+; CHECK-NEXT:    .cfi_offset w30, -48
+; CHECK-NEXT:    mov x19, x3
+; CHECK-NEXT:    mov x20, x2
+; CHECK-NEXT:    mov x21, x1
+; CHECK-NEXT:    mov x22, x0
+; CHECK-NEXT:    stp q0, q1, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    csel x20, x22, x20, lt
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    mov w8, w0
+; CHECK-NEXT:    mov x0, x20
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    cmp w8, #0
+; CHECK-NEXT:    csel x1, x21, x19, lt
+; CHECK-NEXT:    ldp x20, x19, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT:    ldp x22, x21, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #80
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt fp128 %a, %b
+  %s = select i1 %c, i128 %d, i128 %e
+  ret i128 %s
+}
+
+define double @f128_double(fp128 %a, fp128 %b, double %d, double %e) {
+; CHECK-LABEL: f128_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    .cfi_offset b8, -24
+; CHECK-NEXT:    .cfi_offset b9, -32
+; CHECK-NEXT:    fmov d8, d3
+; CHECK-NEXT:    fmov d9, d2
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT:    fcsel d0, d9, d8, lt
+; CHECK-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt fp128 %a, %b
+  %s = select i1 %c, double %d, double %e
+  ret double %s
+}
+
+define float @f128_float(fp128 %a, fp128 %b, float %d, float %e) {
+; CHECK-LABEL: f128_float:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    .cfi_offset b8, -24
+; CHECK-NEXT:    .cfi_offset b9, -32
+; CHECK-NEXT:    fmov s8, s3
+; CHECK-NEXT:    fmov s9, s2
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT:    fcsel s0, s9, s8, lt
+; CHECK-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt fp128 %a, %b
+  %s = select i1 %c, float %d, float %e
+  ret float %s
+}
+
+define i32 @f128_i32(fp128 %a, fp128 %b, i32 %d, i32 %e) {
+; CHECK-LABEL: f128_i32:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    str x30, [sp, #-32]! // 8-byte Folded Spill
+; CHECK-NEXT:    stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-NEXT:    .cfi_offset w19, -8
+; CHECK-NEXT:    .cfi_offset w20, -16
+; CHECK-NEXT:    .cfi_offset w30, -32
+; CHECK-NEXT:    mov w19, w1
+; CHECK-NEXT:    mov w20, w0
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    csel w0, w20, w19, lt
+; CHECK-NEXT:    ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp], #32 // 8-byte Folded Reload
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt fp128 %a, %b
+  %s = select i1 %c, i32 %d, i32 %e
+  ret i32 %s
+}
+
+define half @f128_half(fp128 %a, fp128 %b, half %d, half %e) {
+; CHECK-SD-NOFP16-LABEL: f128_half:
+; CHECK-SD-NOFP16:       // %bb.0: // %entry
+; CHECK-SD-NOFP16-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-NOFP16-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-SD-NOFP16-NEXT:    .cfi_offset w30, -16
+; CHECK-SD-NOFP16-NEXT:    .cfi_offset b8, -24
+; CHECK-SD-NOFP16-NEXT:    .cfi_offset b9, -32
+; CHECK-SD-NOFP16-NEXT:    fmov s8, s3
+; CHECK-SD-NOFP16-NEXT:    fmov s9, s2
+; CHECK-SD-NOFP16-NEXT:    bl __lttf2
+; CHECK-SD-NOFP16-NEXT:    cmp w0, #0
+; CHECK-SD-NOFP16-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT:    fcsel s0, s9, s8, lt
+; CHECK-SD-NOFP16-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-SD-NOFP16-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-SD-NOFP16-NEXT:    ret
+;
+; CHECK-SD-FP16-LABEL: f128_half:
+; CHECK-SD-FP16:       // %bb.0: // %entry
+; CHECK-SD-FP16-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SD-FP16-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-SD-FP16-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-SD-FP16-NEXT:    .cfi_offset w30, -16
+; CHECK-SD-FP16-NEXT:    .cfi_offset b8, -24
+; CHECK-SD-FP16-NEXT:    .cfi_offset b9, -32
+; CHECK-SD-FP16-NEXT:    fmov s8, s3
+; CHECK-SD-FP16-NEXT:    fmov s9, s2
+; CHECK-SD-FP16-NEXT:    bl __lttf2
+; CHECK-SD-FP16-NEXT:    cmp w0, #0
+; CHECK-SD-FP16-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-SD-FP16-NEXT:    fcsel h0, h9, h8, lt
+; CHECK-SD-FP16-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-SD-FP16-NEXT:    ret
+;
+; CHECK-GI-NOFP16-LABEL: f128_half:
+; CHECK-GI-NOFP16:       // %bb.0: // %entry
+; CHECK-GI-NOFP16-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-NOFP16-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-GI-NOFP16-NEXT:    .cfi_offset w30, -16
+; CHECK-GI-NOFP16-NEXT:    .cfi_offset b8, -24
+; CHECK-GI-NOFP16-NEXT:    .cfi_offset b9, -32
+; CHECK-GI-NOFP16-NEXT:    fmov s8, s3
+; CHECK-GI-NOFP16-NEXT:    fmov s9, s2
+; CHECK-GI-NOFP16-NEXT:    bl __lttf2
+; CHECK-GI-NOFP16-NEXT:    cmp w0, #0
+; CHECK-GI-NOFP16-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT:    fcsel s0, s9, s8, lt
+; CHECK-GI-NOFP16-NEXT:    // kill: def $h0 killed $h0 killed $s0
+; CHECK-GI-NOFP16-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-NOFP16-NEXT:    ret
+;
+; CHECK-GI-FP16-LABEL: f128_half:
+; CHECK-GI-FP16:       // %bb.0: // %entry
+; CHECK-GI-FP16-NEXT:    stp d9, d8, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-GI-FP16-NEXT:    str x30, [sp, #16] // 8-byte Folded Spill
+; CHECK-GI-FP16-NEXT:    .cfi_def_cfa_offset 32
+; CHECK-GI-FP16-NEXT:    .cfi_offset w30, -16
+; CHECK-GI-FP16-NEXT:    .cfi_offset b8, -24
+; CHECK-GI-FP16-NEXT:    .cfi_offset b9, -32
+; CHECK-GI-FP16-NEXT:    fmov s8, s3
+; CHECK-GI-FP16-NEXT:    fmov s9, s2
+; CHECK-GI-FP16-NEXT:    bl __lttf2
+; CHECK-GI-FP16-NEXT:    cmp w0, #0
+; CHECK-GI-FP16-NEXT:    ldr x30, [sp, #16] // 8-byte Folded Reload
+; CHECK-GI-FP16-NEXT:    fcsel h0, h9, h8, lt
+; CHECK-GI-FP16-NEXT:    ldp d9, d8, [sp], #32 // 16-byte Folded Reload
+; CHECK-GI-FP16-NEXT:    ret
+entry:
+  %c = fcmp olt fp128 %a, %b
+  %s = select i1 %c, half %d, half %e
+  ret half %s
+}
+
 define double @f64_double(double %a, double %b, double %d, double %e) {
 ; CHECK-LABEL: f64_double:
 ; CHECK:       // %bb.0: // %entry
@@ -135,6 +355,186 @@ entry:
   ret i32 %s
 }
 
+define <2 x fp128> @v2f128_fp128(<2 x fp128> %a, <2 x fp128> %b, <2 x fp128> %d, <2 x fp128> %e) {
+; CHECK-LABEL: v2f128_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #112
+; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q4, q5, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    stp q1, q3, [sp, #32] // 32-byte Folded Spill
+; CHECK-NEXT:    mov v1.16b, v2.16b
+; CHECK-NEXT:    stp q7, q6, [sp, #64] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    b.ge .LBB12_2
+; CHECK-NEXT:  // %bb.1: // %entry
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    str q0, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:  .LBB12_2: // %entry
+; CHECK-NEXT:    ldp q0, q1, [sp, #32] // 32-byte Folded Reload
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    b.ge .LBB12_4
+; CHECK-NEXT:  // %bb.3: // %entry
+; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:  .LBB12_4: // %entry
+; CHECK-NEXT:    ldr q0, [sp, #80] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #96] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #112
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt <2 x fp128> %a, %b
+  %s = select <2 x i1> %c, <2 x fp128> %d, <2 x fp128> %e
+  ret <2 x fp128> %s
+}
+
+define <3 x fp128> @v3f128_fp128(<3 x fp128> %a, <3 x fp128> %b, <3 x fp128> %d, <3 x fp128> %e) {
+; CHECK-LABEL: v3f128_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #112
+; CHECK-NEXT:    str x30, [sp, #96] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 112
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q1, q4, [sp] // 32-byte Folded Spill
+; CHECK-NEXT:    mov v1.16b, v3.16b
+; CHECK-NEXT:    stp q2, q5, [sp, #32] // 32-byte Folded Spill
+; CHECK-NEXT:    stp q6, q7, [sp, #64] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    b.lt .LBB13_2
+; CHECK-NEXT:  // %bb.1:
+; CHECK-NEXT:    ldr q0, [sp, #128]
+; CHECK-NEXT:    str q0, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:  .LBB13_2: // %entry
+; CHECK-NEXT:    ldp q0, q1, [sp] // 32-byte Folded Reload
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    b.lt .LBB13_4
+; CHECK-NEXT:  // %bb.3:
+; CHECK-NEXT:    ldr q0, [sp, #144]
+; CHECK-NEXT:    str q0, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:  .LBB13_4: // %entry
+; CHECK-NEXT:    ldp q0, q1, [sp, #32] // 32-byte Folded Reload
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    add x8, sp, #160
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    add x9, sp, #112
+; CHECK-NEXT:    csel x8, x9, x8, lt
+; CHECK-NEXT:    ldp q0, q1, [sp, #64] // 32-byte Folded Reload
+; CHECK-NEXT:    ldr q2, [x8]
+; CHECK-NEXT:    ldr x30, [sp, #96] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #112
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt <3 x fp128> %a, %b
+  %s = select <3 x i1> %c, <3 x fp128> %d, <3 x fp128> %e
+  ret <3 x fp128> %s
+}
+
+
+define <2 x double> @v2f128_double(<2 x fp128> %a, <2 x fp128> %b, <2 x double> %d, <2 x double> %e) {
+; CHECK-LABEL: v2f128_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #96
+; CHECK-NEXT:    str x30, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 96
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    mov v1.16b, v3.16b
+; CHECK-NEXT:    stp q4, q5, [sp, #48] // 32-byte Folded Spill
+; CHECK-NEXT:    str q2, [sp, #32] // 16-byte Folded Spill
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ldr q1, [sp, #32] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ldr q1, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    mov v0.d[1], v1.d[0]
+; CHECK-NEXT:    ldp q2, q1, [sp, #48] // 32-byte Folded Reload
+; CHECK-NEXT:    bsl v0.16b, v2.16b, v1.16b
+; CHECK-NEXT:    add sp, sp, #96
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt <2 x fp128> %a, %b
+  %s = select <2 x i1> %c, <2 x double> %d, <2 x double> %e
+  ret <2 x double> %s
+}
+
+define <3 x double> @v3f128_double(<3 x fp128> %a, <3 x fp128> %b, <3 x double> %d, <3 x double> %e) {
+; CHECK-LABEL: v3f128_double:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #160
+; CHECK-NEXT:    str x30, [sp, #144] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 160
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    stp q2, q5, [sp, #112] // 32-byte Folded Spill
+; CHECK-NEXT:    // kill: def $d6 killed $d6 def $q6
+; CHECK-NEXT:    // kill: def $d7 killed $d7 def $q7
+; CHECK-NEXT:    ldr d5, [sp, #184]
+; CHECK-NEXT:    str q3, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    ldp d3, d2, [sp, #168]
+; CHECK-NEXT:    mov v6.d[1], v7.d[0]
+; CHECK-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    mov v0.16b, v1.16b
+; CHECK-NEXT:    mov v1.16b, v4.16b
+; CHECK-NEXT:    str q5, [sp, #96] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr d5, [sp, #160]
+; CHECK-NEXT:    mov v3.d[1], v2.d[0]
+; CHECK-NEXT:    str q5, [sp, #80] // 16-byte Folded Spill
+; CHECK-NEXT:    stp q6, q3, [sp, #32] // 32-byte Folded Spill
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ldr q1, [sp, #64] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    fmov d0, x8
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    fmov d1, x8
+; CHECK-NEXT:    mov v1.d[1], v0.d[0]
+; CHECK-NEXT:    str q1, [sp, #64] // 16-byte Folded Spill
+; CHECK-NEXT:    ldp q0, q1, [sp, #112] // 32-byte Folded Reload
+; CHECK-NEXT:    bl __lttf2
+; CHECK-NEXT:    ldp q1, q0, [sp, #32] // 32-byte Folded Reload
+; CHECK-NEXT:    cmp w0, #0
+; CHECK-NEXT:    ldp q2, q4, [sp, #64] // 32-byte Folded Reload
+; CHECK-NEXT:    cset w8, lt
+; CHECK-NEXT:    sbfx x8, x8, #0, #1
+; CHECK-NEXT:    ldr q3, [sp, #96] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #144] // 8-byte Folded Reload
+; CHECK-NEXT:    bit v0.16b, v1.16b, v2.16b
+; CHECK-NEXT:    fmov d2, x8
+; CHECK-NEXT:    bsl v2.16b, v4.16b, v3.16b
+; CHECK-NEXT:    ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT:    // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT:    // kill: def $d2 killed $d2 killed $q2
+; CHECK-NEXT:    // kill: def $d1 killed $d1 killed $q1
+; CHECK-NEXT:    add sp, sp, #160
+; CHECK-NEXT:    ret
+entry:
+  %c = fcmp olt <3 x fp128> %a, %b
+  %s = select <3 x i1> %c, <3 x double> %d, <3 x double> %e
+  ret <3 x double> %s
+}
+
 define <2 x double> @v2f64_double(<2 x double> %a, <2 x double> %b, <2 x double> %d, <2 x double> %e) {
 ; CHECK-LABEL: v2f64_double:
 ; CHECK:       // %bb.0: // %entry


        


More information about the llvm-commits mailing list