[llvm] b0b1a11 - [AArch64] Add libcall for fp128 pow/log/sin/cos/etc.

David Green via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 3 00:03:51 PDT 2024


Author: David Green
Date: 2024-10-03T08:03:47+01:00
New Revision: b0b1a113053deae5315a57f2e9a1a689473525f7

URL: https://github.com/llvm/llvm-project/commit/b0b1a113053deae5315a57f2e9a1a689473525f7
DIFF: https://github.com/llvm/llvm-project/commit/b0b1a113053deae5315a57f2e9a1a689473525f7.diff

LOG: [AArch64] Add libcall for fp128 pow/log/sin/cos/etc.

As with other operations, this adds libcall support for fp128 G_FPOW,
G_FLOG/G_FEXP/etc, and G_FSIN/G_FCOS/etc operations.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
    llvm/test/CodeGen/AArch64/fexplog.ll
    llvm/test/CodeGen/AArch64/fpow.ll
    llvm/test/CodeGen/AArch64/fsincos.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
index 51aeee023f2e34..c00470b4d5c763 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64LegalizerInfo.cpp
@@ -291,7 +291,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget &ST)
       .scalarize(0)
       // Regardless of FP16 support, widen 16-bit elements to 32-bits.
       .minScalar(0, s32)
-      .libcallFor({s32, s64});
+      .libcallFor({s32, s64, s128});
   getActionDefinitionsBuilder(G_FPOWI)
       .scalarize(0)
       .minScalar(0, s32)

diff  --git a/llvm/test/CodeGen/AArch64/fexplog.ll b/llvm/test/CodeGen/AArch64/fexplog.ll
index 30ce389f231281..f13e2fcd1c4483 100644
--- a/llvm/test/CodeGen/AArch64/fexplog.ll
+++ b/llvm/test/CodeGen/AArch64/fexplog.ll
@@ -36,16 +36,26 @@ entry:
   ret half %c
 }
 
-define <1 x double> @exp_v1f64(<1 x double> %x) {
+define fp128 @exp_fp128(fp128 %a) {
+; CHECK-LABEL: exp_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b expl
+entry:
+  %c = call fp128 @llvm.exp.fp128(fp128 %a)
+  ret fp128 %c
+}
+
+define <1 x double> @exp_v1f64(<1 x double> %a) {
 ; CHECK-LABEL: exp_v1f64:
-; CHECK:       // %bb.0:
+; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl exp
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %c = call <1 x double> @llvm.exp.v1f64(<1 x double> %x)
+entry:
+  %c = call <1 x double> @llvm.exp.v1f64(<1 x double> %a)
   ret <1 x double> %c
 }
 
@@ -1273,6 +1283,28 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @exp_v2fp128(<2 x fp128> %a) {
+; CHECK-LABEL: exp_v2fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    bl expl
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl expl
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.exp.v2fp128(<2 x fp128> %a)
+  ret <2 x fp128> %c
+}
+
 define double @exp2_f64(double %a) {
 ; CHECK-LABEL: exp2_f64:
 ; CHECK:       // %bb.0: // %entry
@@ -1307,16 +1339,26 @@ entry:
   ret half %c
 }
 
-define <1 x double> @exp2_v1f64(<1 x double> %x) {
+define fp128 @exp2_fp128(fp128 %a) {
+; CHECK-LABEL: exp2_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b exp2l
+entry:
+  %c = call fp128 @llvm.exp2.fp128(fp128 %a)
+  ret fp128 %c
+}
+
+define <1 x double> @exp2_v1f64(<1 x double> %a) {
 ; CHECK-LABEL: exp2_v1f64:
-; CHECK:       // %bb.0:
+; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl exp2
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %c = call <1 x double> @llvm.exp2.v1f64(<1 x double> %x)
+entry:
+  %c = call <1 x double> @llvm.exp2.v1f64(<1 x double> %a)
   ret <1 x double> %c
 }
 
@@ -2544,6 +2586,28 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @exp2_v2fp128(<2 x fp128> %a) {
+; CHECK-LABEL: exp2_v2fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    bl exp2l
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl exp2l
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.exp2.v2fp128(<2 x fp128> %a)
+  ret <2 x fp128> %c
+}
+
 define double @log_f64(double %a) {
 ; CHECK-LABEL: log_f64:
 ; CHECK:       // %bb.0: // %entry
@@ -2578,16 +2642,26 @@ entry:
   ret half %c
 }
 
-define <1 x double> @log_v1f64(<1 x double> %x) {
+define fp128 @log_fp128(fp128 %a) {
+; CHECK-LABEL: log_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b logl
+entry:
+  %c = call fp128 @llvm.log.fp128(fp128 %a)
+  ret fp128 %c
+}
+
+define <1 x double> @log_v1f64(<1 x double> %a) {
 ; CHECK-LABEL: log_v1f64:
-; CHECK:       // %bb.0:
+; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl log
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %c = call <1 x double> @llvm.log.v1f64(<1 x double> %x)
+entry:
+  %c = call <1 x double> @llvm.log.v1f64(<1 x double> %a)
   ret <1 x double> %c
 }
 
@@ -3815,6 +3889,28 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @log_v2fp128(<2 x fp128> %a) {
+; CHECK-LABEL: log_v2fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    bl logl
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl logl
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.log.v2fp128(<2 x fp128> %a)
+  ret <2 x fp128> %c
+}
+
 define double @log2_f64(double %a) {
 ; CHECK-LABEL: log2_f64:
 ; CHECK:       // %bb.0: // %entry
@@ -3849,16 +3945,26 @@ entry:
   ret half %c
 }
 
-define <1 x double> @log2_v1f64(<1 x double> %x) {
+define fp128 @log2_fp128(fp128 %a) {
+; CHECK-LABEL: log2_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b log2l
+entry:
+  %c = call fp128 @llvm.log2.fp128(fp128 %a)
+  ret fp128 %c
+}
+
+define <1 x double> @log2_v1f64(<1 x double> %a) {
 ; CHECK-LABEL: log2_v1f64:
-; CHECK:       // %bb.0:
+; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl log2
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %c = call <1 x double> @llvm.log2.v1f64(<1 x double> %x)
+entry:
+  %c = call <1 x double> @llvm.log2.v1f64(<1 x double> %a)
   ret <1 x double> %c
 }
 
@@ -5086,6 +5192,28 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @log2_v2fp128(<2 x fp128> %a) {
+; CHECK-LABEL: log2_v2fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    bl log2l
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl log2l
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.log2.v2fp128(<2 x fp128> %a)
+  ret <2 x fp128> %c
+}
+
 define double @log10_f64(double %a) {
 ; CHECK-LABEL: log10_f64:
 ; CHECK:       // %bb.0: // %entry
@@ -5120,16 +5248,26 @@ entry:
   ret half %c
 }
 
-define <1 x double> @log10_v1f64(<1 x double> %x) {
+define fp128 @log10_fp128(fp128 %a) {
+; CHECK-LABEL: log10_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b log10l
+entry:
+  %c = call fp128 @llvm.log10.fp128(fp128 %a)
+  ret fp128 %c
+}
+
+define <1 x double> @log10_v1f64(<1 x double> %a) {
 ; CHECK-LABEL: log10_v1f64:
-; CHECK:       // %bb.0:
+; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
 ; CHECK-NEXT:    bl log10
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  %c = call <1 x double> @llvm.log10.v1f64(<1 x double> %x)
+entry:
+  %c = call <1 x double> @llvm.log10.v1f64(<1 x double> %a)
   ret <1 x double> %c
 }
 
@@ -6357,6 +6495,33 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @log10_v2fp128(<2 x fp128> %a) {
+; CHECK-LABEL: log10_v2fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    bl log10l
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl log10l
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.log10.v2fp128(<2 x fp128> %a)
+  ret <2 x fp128> %c
+}
+
+declare <1 x double> @llvm.exp.v1f64(<1 x double>)
+declare <1 x double> @llvm.exp2.v1f64(<1 x double>)
+declare <1 x double> @llvm.log.v1f64(<1 x double>)
+declare <1 x double> @llvm.log10.v1f64(<1 x double>)
+declare <1 x double> @llvm.log2.v1f64(<1 x double>)
 declare <16 x half> @llvm.exp.v16f16(<16 x half>)
 declare <16 x half> @llvm.exp2.v16f16(<16 x half>)
 declare <16 x half> @llvm.log.v16f16(<16 x half>)
@@ -6372,6 +6537,11 @@ declare <2 x float> @llvm.exp2.v2f32(<2 x float>)
 declare <2 x float> @llvm.log.v2f32(<2 x float>)
 declare <2 x float> @llvm.log10.v2f32(<2 x float>)
 declare <2 x float> @llvm.log2.v2f32(<2 x float>)
+declare <2 x fp128> @llvm.exp.v2fp128(<2 x fp128>)
+declare <2 x fp128> @llvm.exp2.v2fp128(<2 x fp128>)
+declare <2 x fp128> @llvm.log.v2fp128(<2 x fp128>)
+declare <2 x fp128> @llvm.log10.v2fp128(<2 x fp128>)
+declare <2 x fp128> @llvm.log2.v2fp128(<2 x fp128>)
 declare <3 x double> @llvm.exp.v3f64(<3 x double>)
 declare <3 x double> @llvm.exp2.v3f64(<3 x double>)
 declare <3 x double> @llvm.log.v3f64(<3 x double>)
@@ -6422,6 +6592,11 @@ declare float @llvm.exp2.f32(float)
 declare float @llvm.log.f32(float)
 declare float @llvm.log10.f32(float)
 declare float @llvm.log2.f32(float)
+declare fp128 @llvm.exp.fp128(fp128)
+declare fp128 @llvm.exp2.fp128(fp128)
+declare fp128 @llvm.log.fp128(fp128)
+declare fp128 @llvm.log10.fp128(fp128)
+declare fp128 @llvm.log2.fp128(fp128)
 declare half @llvm.exp.f16(half)
 declare half @llvm.exp2.f16(half)
 declare half @llvm.log.f16(half)

diff  --git a/llvm/test/CodeGen/AArch64/fpow.ll b/llvm/test/CodeGen/AArch64/fpow.ll
index 6e8cd0c8c00b41..dc93d5be9b3f38 100644
--- a/llvm/test/CodeGen/AArch64/fpow.ll
+++ b/llvm/test/CodeGen/AArch64/fpow.ll
@@ -37,14 +37,23 @@ entry:
   ret half %c
 }
 
+define fp128 @pow_fp128(fp128 %a, fp128 %b) {
+; CHECK-LABEL: pow_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b powl
+entry:
+  %c = call fp128 @llvm.pow.fp128(fp128 %a, fp128 %b)
+  ret fp128 %c
+}
+
 define <1 x double> @pow_v1f64(<1 x double> %x) {
 ; CHECK-LABEL: pow_v1f64:
 ; CHECK:       // %bb.0:
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    adrp x8, .LCPI3_0
-; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI3_0]
+; CHECK-NEXT:    adrp x8, .LCPI4_0
+; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI4_0]
 ; CHECK-NEXT:    bl pow
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
@@ -1591,9 +1600,51 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @pow_v2fp128(<2 x fp128> %a, <2 x fp128> %b) {
+; CHECK-SD-LABEL: pow_v2fp128:
+; CHECK-SD:       // %bb.0: // %entry
+; CHECK-SD-NEXT:    sub sp, sp, #64
+; CHECK-SD-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-SD-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-SD-NEXT:    .cfi_offset w30, -16
+; CHECK-SD-NEXT:    stp q1, q3, [sp, #16] // 32-byte Folded Spill
+; CHECK-SD-NEXT:    mov v1.16b, v2.16b
+; CHECK-SD-NEXT:    bl powl
+; CHECK-SD-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-SD-NEXT:    ldp q0, q1, [sp, #16] // 32-byte Folded Reload
+; CHECK-SD-NEXT:    bl powl
+; CHECK-SD-NEXT:    mov v1.16b, v0.16b
+; CHECK-SD-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-SD-NEXT:    ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-SD-NEXT:    add sp, sp, #64
+; CHECK-SD-NEXT:    ret
+;
+; CHECK-GI-LABEL: pow_v2fp128:
+; CHECK-GI:       // %bb.0: // %entry
+; CHECK-GI-NEXT:    sub sp, sp, #64
+; CHECK-GI-NEXT:    str x30, [sp, #48] // 8-byte Folded Spill
+; CHECK-GI-NEXT:    .cfi_def_cfa_offset 64
+; CHECK-GI-NEXT:    .cfi_offset w30, -16
+; CHECK-GI-NEXT:    stp q3, q1, [sp, #16] // 32-byte Folded Spill
+; CHECK-GI-NEXT:    mov v1.16b, v2.16b
+; CHECK-GI-NEXT:    bl powl
+; CHECK-GI-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-GI-NEXT:    ldp q1, q0, [sp, #16] // 32-byte Folded Reload
+; CHECK-GI-NEXT:    bl powl
+; CHECK-GI-NEXT:    mov v1.16b, v0.16b
+; CHECK-GI-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-GI-NEXT:    ldr x30, [sp, #48] // 8-byte Folded Reload
+; CHECK-GI-NEXT:    add sp, sp, #64
+; CHECK-GI-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.pow.v2fp128(<2 x fp128> %a, <2 x fp128> %b)
+  ret <2 x fp128> %c
+}
+
 declare <16 x half> @llvm.pow.v16f16(<16 x half>, <16 x half>)
 declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
 declare <2 x float> @llvm.pow.v2f32(<2 x float>, <2 x float>)
+declare <2 x fp128> @llvm.pow.v2fp128(<2 x fp128>, <2 x fp128>)
 declare <3 x double> @llvm.pow.v3f64(<3 x double>, <3 x double>)
 declare <3 x float> @llvm.pow.v3f32(<3 x float>, <3 x float>)
 declare <4 x double> @llvm.pow.v4f64(<4 x double>, <4 x double>)
@@ -1604,4 +1655,5 @@ declare <8 x float> @llvm.pow.v8f32(<8 x float>, <8 x float>)
 declare <8 x half> @llvm.pow.v8f16(<8 x half>, <8 x half>)
 declare double @llvm.pow.f64(double, double)
 declare float @llvm.pow.f32(float, float)
+declare fp128 @llvm.pow.fp128(fp128, fp128)
 declare half @llvm.pow.f16(half, half)

diff  --git a/llvm/test/CodeGen/AArch64/fsincos.ll b/llvm/test/CodeGen/AArch64/fsincos.ll
index 557add3a4eaeb2..2afc56a7139fbf 100644
--- a/llvm/test/CodeGen/AArch64/fsincos.ll
+++ b/llvm/test/CodeGen/AArch64/fsincos.ll
@@ -36,6 +36,15 @@ entry:
   ret half %c
 }
 
+define fp128 @sin_fp128(fp128 %a) {
+; CHECK-LABEL: sin_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b sinl
+entry:
+  %c = call fp128 @llvm.sin.fp128(fp128 %a)
+  ret fp128 %c
+}
+
 define <1 x double> @sin_v1f64(<1 x double> %x) {
 ; CHECK-LABEL: sin_v1f64:
 ; CHECK:       // %bb.0:
@@ -1273,6 +1282,28 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @sin_v2fp128(<2 x fp128> %a) {
+; CHECK-LABEL: sin_v2fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    bl sinl
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl sinl
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.sin.v2fp128(<2 x fp128> %a)
+  ret <2 x fp128> %c
+}
+
 define double @cos_f64(double %a) {
 ; CHECK-LABEL: cos_f64:
 ; CHECK:       // %bb.0: // %entry
@@ -1307,6 +1338,15 @@ entry:
   ret half %c
 }
 
+define fp128 @cos_fp128(fp128 %a) {
+; CHECK-LABEL: cos_fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    b cosl
+entry:
+  %c = call fp128 @llvm.cos.fp128(fp128 %a)
+  ret fp128 %c
+}
+
 define <1 x double> @cos_v1f64(<1 x double> %x) {
 ; CHECK-LABEL: cos_v1f64:
 ; CHECK:       // %bb.0:
@@ -2544,6 +2584,28 @@ entry:
   ret <16 x half> %c
 }
 
+define <2 x fp128> @cos_v2fp128(<2 x fp128> %a) {
+; CHECK-LABEL: cos_v2fp128:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    sub sp, sp, #48
+; CHECK-NEXT:    str x30, [sp, #32] // 8-byte Folded Spill
+; CHECK-NEXT:    .cfi_def_cfa_offset 48
+; CHECK-NEXT:    .cfi_offset w30, -16
+; CHECK-NEXT:    str q1, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT:    bl cosl
+; CHECK-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT:    bl cosl
+; CHECK-NEXT:    mov v1.16b, v0.16b
+; CHECK-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; CHECK-NEXT:    ldr x30, [sp, #32] // 8-byte Folded Reload
+; CHECK-NEXT:    add sp, sp, #48
+; CHECK-NEXT:    ret
+entry:
+  %c = call <2 x fp128> @llvm.cos.v2fp128(<2 x fp128> %a)
+  ret <2 x fp128> %c
+}
+
 ; This is testing that we do not produce incorrect tailcall lowerings
 define i64 @donttailcall(double noundef %x, double noundef %y) {
 ; CHECK-LABEL: donttailcall:
@@ -2568,6 +2630,8 @@ declare <2 x double> @llvm.cos.v2f64(<2 x double>)
 declare <2 x double> @llvm.sin.v2f64(<2 x double>)
 declare <2 x float> @llvm.cos.v2f32(<2 x float>)
 declare <2 x float> @llvm.sin.v2f32(<2 x float>)
+declare <2 x fp128> @llvm.cos.v2fp128(<2 x fp128>)
+declare <2 x fp128> @llvm.sin.v2fp128(<2 x fp128>)
 declare <3 x double> @llvm.cos.v3f64(<3 x double>)
 declare <3 x double> @llvm.sin.v3f64(<3 x double>)
 declare <3 x float> @llvm.cos.v3f32(<3 x float>)
@@ -2588,5 +2652,7 @@ declare double @llvm.cos.f64(double)
 declare double @llvm.sin.f64(double)
 declare float @llvm.cos.f32(float)
 declare float @llvm.sin.f32(float)
+declare fp128 @llvm.cos.fp128(fp128)
+declare fp128 @llvm.sin.fp128(fp128)
 declare half @llvm.cos.f16(half)
 declare half @llvm.sin.f16(half)


        


More information about the llvm-commits mailing list