[llvm] LoongArch: Add test for llvm.exp10 intrinsic (PR #148606)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 14 05:52:57 PDT 2025
https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/148606
>From fd3e1667bfd5cd8a359770e348133d25099a594c Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 14 Jul 2025 19:24:15 +0900
Subject: [PATCH] LoongArch: Add test for llvm.exp10 intrinsic
---
llvm/test/CodeGen/LoongArch/llvm.exp10.ll | 362 ++++++++++++++++++++++
1 file changed, 362 insertions(+)
create mode 100644 llvm/test/CodeGen/LoongArch/llvm.exp10.ll
diff --git a/llvm/test/CodeGen/LoongArch/llvm.exp10.ll b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll
new file mode 100644
index 0000000000000..7a52531daa802
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/llvm.exp10.ll
@@ -0,0 +1,362 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=loongarch32 -mattr=+d < %s | FileCheck -check-prefix=LA32 %s
+; RUN: llc -mtriple=loongarch64 -mattr=+d < %s | FileCheck -check-prefix=LA64 %s
+
+define half @exp10_f16(half %x) #0 {
+; LA32-LABEL: exp10_f16:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: bl __extendhfsf2
+; LA32-NEXT: bl exp10f
+; LA32-NEXT: bl __truncsfhf2
+; LA32-NEXT: movfr2gr.s $a0, $fa0
+; LA32-NEXT: lu12i.w $a1, -16
+; LA32-NEXT: or $a0, $a0, $a1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: exp10_f16:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: lu12i.w $a1, -16
+; LA64-NEXT: or $a0, $a0, $a1
+; LA64-NEXT: movgr2fr.w $fa0, $a0
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+ %r = call half @llvm.exp10.f16(half %x)
+ ret half %r
+}
+
+define <2 x half> @exp10_v2f16(<2 x half> %x) #0 {
+; LA32-LABEL: exp10_v2f16:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 0 # 8-byte Folded Spill
+; LA32-NEXT: movgr2fr.w $fs0, $a1
+; LA32-NEXT: movgr2fr.w $fa0, $a0
+; LA32-NEXT: bl __extendhfsf2
+; LA32-NEXT: bl exp10f
+; LA32-NEXT: bl __truncsfhf2
+; LA32-NEXT: movfr2gr.s $fp, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
+; LA32-NEXT: bl __extendhfsf2
+; LA32-NEXT: bl exp10f
+; LA32-NEXT: bl __truncsfhf2
+; LA32-NEXT: movfr2gr.s $a1, $fa0
+; LA32-NEXT: move $a0, $fp
+; LA32-NEXT: fld.d $fs0, $sp, 0 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: ret
+;
+; LA64-LABEL: exp10_v2f16:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -32
+; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: move $fp, $a0
+; LA64-NEXT: movgr2fr.w $fa0, $a1
+; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: st.h $a0, $sp, 2
+; LA64-NEXT: movgr2fr.w $fa0, $fp
+; LA64-NEXT: pcaddu18i $ra, %call36(__extendhfsf2)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: pcaddu18i $ra, %call36(__truncsfhf2)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: movfr2gr.s $a0, $fa0
+; LA64-NEXT: st.h $a0, $sp, 0
+; LA64-NEXT: vld $vr0, $sp, 0
+; LA64-NEXT: vpickve2gr.h $a0, $vr0, 0
+; LA64-NEXT: vpickve2gr.h $a1, $vr0, 1
+; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 32
+; LA64-NEXT: ret
+ %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x)
+ ret <2 x half> %r
+}
+
+define float @exp10_f32(float %x) #0 {
+; LA32-LABEL: exp10_f32:
+; LA32: # %bb.0:
+; LA32-NEXT: b exp10f
+;
+; LA64-LABEL: exp10_f32:
+; LA64: # %bb.0:
+; LA64-NEXT: pcaddu18i $t8, %call36(exp10f)
+; LA64-NEXT: jr $t8
+ %r = call float @llvm.exp10.f32(float %x)
+ ret float %r
+}
+
+define <2 x float> @exp10_v2f32(<2 x float> %x) #0 {
+; LA32-LABEL: exp10_v2f32:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: fmov.s $fs0, $fa1
+; LA32-NEXT: bl exp10f
+; LA32-NEXT: fmov.s $fs1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs0
+; LA32-NEXT: bl exp10f
+; LA32-NEXT: fmov.s $fa1, $fa0
+; LA32-NEXT: fmov.s $fa0, $fs1
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
+; LA32-NEXT: ret
+;
+; LA64-LABEL: exp10_v2f32:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -48
+; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
+; LA64-NEXT: vreplvei.w $vr0, $vr0, 0
+; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
+; LA64-NEXT: vreplvei.w $vr0, $vr0, 1
+; LA64-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10f)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: # kill: def $f0 killed $f0 def $vr0
+; LA64-NEXT: vld $vr1, $sp, 16 # 16-byte Folded Reload
+; LA64-NEXT: vpackev.w $vr0, $vr0, $vr1
+; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 48
+; LA64-NEXT: ret
+ %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x)
+ ret <2 x float> %r
+}
+
+define double @exp10_f64(double %x) #0 {
+; LA32-LABEL: exp10_f64:
+; LA32: # %bb.0:
+; LA32-NEXT: b exp10
+;
+; LA64-LABEL: exp10_f64:
+; LA64: # %bb.0:
+; LA64-NEXT: pcaddu18i $t8, %call36(exp10)
+; LA64-NEXT: jr $t8
+ %r = call double @llvm.exp10.f64(double %x)
+ ret double %r
+}
+
+define <2 x double> @exp10_v2f64(<2 x double> %x) #0 {
+; LA32-LABEL: exp10_v2f64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -32
+; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
+; LA32-NEXT: fst.d $fs0, $sp, 16 # 8-byte Folded Spill
+; LA32-NEXT: fst.d $fs1, $sp, 8 # 8-byte Folded Spill
+; LA32-NEXT: fmov.d $fs0, $fa1
+; LA32-NEXT: bl exp10
+; LA32-NEXT: fmov.d $fs1, $fa0
+; LA32-NEXT: fmov.d $fa0, $fs0
+; LA32-NEXT: bl exp10
+; LA32-NEXT: fmov.d $fa1, $fa0
+; LA32-NEXT: fmov.d $fa0, $fs1
+; LA32-NEXT: fld.d $fs1, $sp, 8 # 8-byte Folded Reload
+; LA32-NEXT: fld.d $fs0, $sp, 16 # 8-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 32
+; LA32-NEXT: ret
+;
+; LA64-LABEL: exp10_v2f64:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -48
+; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
+; LA64-NEXT: vreplvei.d $vr0, $vr0, 0
+; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: movfr2gr.d $a0, $fa0
+; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 0
+; LA64-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; LA64-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
+; LA64-NEXT: vreplvei.d $vr0, $vr0, 1
+; LA64-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: movfr2gr.d $a0, $fa0
+; LA64-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
+; LA64-NEXT: vinsgr2vr.d $vr0, $a0, 1
+; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 48
+; LA64-NEXT: ret
+ %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x)
+ ret <2 x double> %r
+}
+
+define fp128 @exp10_f128(fp128 %x) #0 {
+; LA32-LABEL: exp10_f128:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -48
+; LA32-NEXT: st.w $ra, $sp, 44 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 40 # 4-byte Folded Spill
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: ld.w $a3, $a1, 4
+; LA32-NEXT: ld.w $a4, $a1, 8
+; LA32-NEXT: ld.w $a1, $a1, 12
+; LA32-NEXT: move $fp, $a0
+; LA32-NEXT: st.w $a1, $sp, 20
+; LA32-NEXT: st.w $a4, $sp, 16
+; LA32-NEXT: st.w $a3, $sp, 12
+; LA32-NEXT: addi.w $a0, $sp, 24
+; LA32-NEXT: addi.w $a1, $sp, 8
+; LA32-NEXT: st.w $a2, $sp, 8
+; LA32-NEXT: bl exp10l
+; LA32-NEXT: ld.w $a0, $sp, 36
+; LA32-NEXT: ld.w $a1, $sp, 32
+; LA32-NEXT: ld.w $a2, $sp, 28
+; LA32-NEXT: ld.w $a3, $sp, 24
+; LA32-NEXT: st.w $a0, $fp, 12
+; LA32-NEXT: st.w $a1, $fp, 8
+; LA32-NEXT: st.w $a2, $fp, 4
+; LA32-NEXT: st.w $a3, $fp, 0
+; LA32-NEXT: ld.w $fp, $sp, 40 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 44 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 48
+; LA32-NEXT: ret
+;
+; LA64-LABEL: exp10_f128:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -16
+; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10l)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 16
+; LA64-NEXT: ret
+ %r = call fp128 @llvm.exp10.f128(fp128 %x)
+ ret fp128 %r
+}
+
+define <2 x fp128> @exp10_v2f128(<2 x fp128> %x) #0 {
+; LA32-LABEL: exp10_v2f128:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -96
+; LA32-NEXT: st.w $ra, $sp, 92 # 4-byte Folded Spill
+; LA32-NEXT: st.w $fp, $sp, 88 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s0, $sp, 84 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s1, $sp, 80 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s2, $sp, 76 # 4-byte Folded Spill
+; LA32-NEXT: st.w $s3, $sp, 72 # 4-byte Folded Spill
+; LA32-NEXT: ld.w $s0, $a1, 16
+; LA32-NEXT: ld.w $s1, $a1, 20
+; LA32-NEXT: ld.w $s2, $a1, 24
+; LA32-NEXT: ld.w $s3, $a1, 28
+; LA32-NEXT: ld.w $a2, $a1, 0
+; LA32-NEXT: ld.w $a3, $a1, 4
+; LA32-NEXT: ld.w $a4, $a1, 8
+; LA32-NEXT: ld.w $a1, $a1, 12
+; LA32-NEXT: move $fp, $a0
+; LA32-NEXT: st.w $a1, $sp, 20
+; LA32-NEXT: st.w $a4, $sp, 16
+; LA32-NEXT: st.w $a3, $sp, 12
+; LA32-NEXT: addi.w $a0, $sp, 24
+; LA32-NEXT: addi.w $a1, $sp, 8
+; LA32-NEXT: st.w $a2, $sp, 8
+; LA32-NEXT: bl exp10l
+; LA32-NEXT: st.w $s3, $sp, 52
+; LA32-NEXT: st.w $s2, $sp, 48
+; LA32-NEXT: st.w $s1, $sp, 44
+; LA32-NEXT: addi.w $a0, $sp, 56
+; LA32-NEXT: addi.w $a1, $sp, 40
+; LA32-NEXT: st.w $s0, $sp, 40
+; LA32-NEXT: bl exp10l
+; LA32-NEXT: ld.w $a0, $sp, 24
+; LA32-NEXT: ld.w $a1, $sp, 28
+; LA32-NEXT: ld.w $a2, $sp, 32
+; LA32-NEXT: ld.w $a3, $sp, 36
+; LA32-NEXT: ld.w $a4, $sp, 68
+; LA32-NEXT: ld.w $a5, $sp, 64
+; LA32-NEXT: ld.w $a6, $sp, 60
+; LA32-NEXT: ld.w $a7, $sp, 56
+; LA32-NEXT: st.w $a4, $fp, 28
+; LA32-NEXT: st.w $a5, $fp, 24
+; LA32-NEXT: st.w $a6, $fp, 20
+; LA32-NEXT: st.w $a7, $fp, 16
+; LA32-NEXT: st.w $a3, $fp, 12
+; LA32-NEXT: st.w $a2, $fp, 8
+; LA32-NEXT: st.w $a1, $fp, 4
+; LA32-NEXT: st.w $a0, $fp, 0
+; LA32-NEXT: ld.w $s3, $sp, 72 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s2, $sp, 76 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s1, $sp, 80 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $s0, $sp, 84 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $fp, $sp, 88 # 4-byte Folded Reload
+; LA32-NEXT: ld.w $ra, $sp, 92 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 96
+; LA32-NEXT: ret
+;
+; LA64-LABEL: exp10_v2f128:
+; LA64: # %bb.0:
+; LA64-NEXT: addi.d $sp, $sp, -48
+; LA64-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; LA64-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s2, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT: st.d $s3, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT: ld.d $fp, $a1, 16
+; LA64-NEXT: ld.d $s0, $a1, 24
+; LA64-NEXT: ld.d $a2, $a1, 0
+; LA64-NEXT: ld.d $a1, $a1, 8
+; LA64-NEXT: move $s1, $a0
+; LA64-NEXT: move $a0, $a2
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10l)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: move $s2, $a0
+; LA64-NEXT: move $s3, $a1
+; LA64-NEXT: move $a0, $fp
+; LA64-NEXT: move $a1, $s0
+; LA64-NEXT: pcaddu18i $ra, %call36(exp10l)
+; LA64-NEXT: jirl $ra, $ra, 0
+; LA64-NEXT: st.d $a1, $s1, 24
+; LA64-NEXT: st.d $a0, $s1, 16
+; LA64-NEXT: st.d $s3, $s1, 8
+; LA64-NEXT: st.d $s2, $s1, 0
+; LA64-NEXT: ld.d $s3, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s2, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; LA64-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; LA64-NEXT: addi.d $sp, $sp, 48
+; LA64-NEXT: ret
+ %r = call <2 x fp128> @llvm.exp10.v2f128(<2 x fp128> %x)
+ ret <2 x fp128> %r
+}
+
+attributes #0 = { nounwind }
More information about the llvm-commits
mailing list