[llvm] [llvm][CodeGen] Intrinsic `llvm.powi.*` code gen for vector arguments (PR #118242)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Dec 1 18:07:21 PST 2024
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-selectiondag
Author: Zhaoxin Yang (ylzsx)
<details>
<summary>Changes</summary>
In some backends, the i32 type is illegal and will be promoted. This causes exponent type check to fail when ISD::FOWI node generates a libcall.
Fix https://github.com/llvm/llvm-project/issues/118079
---
Patch is 66.95 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/118242.diff
4 Files Affected:
- (modified) llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (+18)
- (added) llvm/test/CodeGen/LoongArch/lasx/intrinsic-fpowi.ll (+142)
- (added) llvm/test/CodeGen/LoongArch/lsx/intrinsic-fpowi.ll (+88)
- (added) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll (+1427)
``````````diff
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 63536336e96228..2829bbaef83100 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -4648,6 +4648,24 @@ void SelectionDAGLegalize::ConvertNodeToLibcall(SDNode *Node) {
bool ExponentHasSizeOfInt =
DAG.getLibInfo().getIntSize() ==
Node->getOperand(1 + Offset).getValueType().getSizeInBits();
+ if (!ExponentHasSizeOfInt) {
+ // In some backends, such as RISCV64 and LoongArch64, the i32 type is
+ // illegal and is promoted by previous process. For such cases, the
+ // exponent actually matches with sizeof(int) and a libcall should be
+ // generated.
+ SDNode *ExponentNode = Node->getOperand(1 + Offset).getNode();
+ unsigned LibIntSize = DAG.getLibInfo().getIntSize();
+ if (ExponentNode->getOpcode() == ISD::SIGN_EXTEND_INREG ||
+ ExponentNode->getOpcode() == ISD::AssertSext ||
+ ExponentNode->getOpcode() == ISD::AssertZext) {
+ EVT InnerType = cast<VTSDNode>(ExponentNode->getOperand(1))->getVT();
+ ExponentHasSizeOfInt = LibIntSize == InnerType.getSizeInBits();
+ } else if (ISD::isExtOpcode(ExponentNode->getOpcode())) {
+ ExponentHasSizeOfInt =
+ LibIntSize ==
+ ExponentNode->getOperand(0).getValueType().getSizeInBits();
+ }
+ }
if (!ExponentHasSizeOfInt) {
// If the exponent does not match with sizeof(int) a libcall to
// RTLIB::POWI would use the wrong type for the argument.
diff --git a/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fpowi.ll b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fpowi.ll
new file mode 100644
index 00000000000000..f6b14a9bb000fd
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/intrinsic-fpowi.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+declare <8 x float> @llvm.powi.v8f32.i32(<8 x float>, i32)
+
+define <8 x float> @powi_v8f32(<8 x float> %va, i32 %b) nounwind {
+; CHECK-LABEL: powi_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi.d $sp, $sp, -80
+; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 64 # 8-byte Folded Spill
+; CHECK-NEXT: xvst $xr0, $sp, 0 # 32-byte Folded Spill
+; CHECK-NEXT: addi.w $fp, $a0, 0
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 0
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 0
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 1
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 1
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 2
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 2
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 3
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 3
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 4
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 4
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 5
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 5
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 6
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 6
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.w $a0, $xr0, 7
+; CHECK-NEXT: movgr2fr.w $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a0, 7
+; CHECK-NEXT: ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 80
+; CHECK-NEXT: ret
+entry:
+ %res = call <8 x float> @llvm.powi.v8f32.i32(<8 x float> %va, i32 %b)
+ ret <8 x float> %res
+}
+
+declare <4 x double> @llvm.powi.v4f64.i32(<4 x double>, i32)
+
+define <4 x double> @powi_v4f64(<4 x double> %va, i32 %b) nounwind {
+; CHECK-LABEL: powi_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi.d $sp, $sp, -80
+; CHECK-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 64 # 8-byte Folded Spill
+; CHECK-NEXT: xvst $xr0, $sp, 0 # 32-byte Folded Spill
+; CHECK-NEXT: addi.w $fp, $a0, 0
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 0
+; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powidf2)
+; CHECK-NEXT: movfr2gr.d $a0, $fa0
+; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 0
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 1
+; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powidf2)
+; CHECK-NEXT: movfr2gr.d $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 1
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 2
+; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powidf2)
+; CHECK-NEXT: movfr2gr.d $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 2
+; CHECK-NEXT: xvst $xr0, $sp, 32 # 32-byte Folded Spill
+; CHECK-NEXT: xvld $xr0, $sp, 0 # 32-byte Folded Reload
+; CHECK-NEXT: xvpickve2gr.d $a0, $xr0, 3
+; CHECK-NEXT: movgr2fr.d $fa0, $a0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powidf2)
+; CHECK-NEXT: movfr2gr.d $a0, $fa0
+; CHECK-NEXT: xvld $xr0, $sp, 32 # 32-byte Folded Reload
+; CHECK-NEXT: xvinsgr2vr.d $xr0, $a0, 3
+; CHECK-NEXT: ld.d $fp, $sp, 64 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 80
+; CHECK-NEXT: ret
+entry:
+ %res = call <4 x double> @llvm.powi.v4f64.i32(<4 x double> %va, i32 %b)
+ ret <4 x double> %res
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fpowi.ll b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fpowi.ll
new file mode 100644
index 00000000000000..b0f54e78c7a442
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/intrinsic-fpowi.ll
@@ -0,0 +1,88 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
+
+declare <4 x float> @llvm.powi.v4f32.i32(<4 x float>, i32)
+
+define <4 x float> @powi_v4f32(<4 x float> %va, i32 %b) nounwind {
+; CHECK-LABEL: powi_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi.d $sp, $sp, -48
+; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
+; CHECK-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
+; CHECK-NEXT: addi.w $fp, $a0, 0
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 0
+; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 1
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 1
+; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 2
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 2
+; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 3
+; CHECK-NEXT: # kill: def $f0 killed $f0 killed $vr0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powisf2)
+; CHECK-NEXT: movfr2gr.s $a0, $fa0
+; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a0, 3
+; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 48
+; CHECK-NEXT: ret
+entry:
+ %res = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> %va, i32 %b)
+ ret <4 x float> %res
+}
+
+declare <2 x double> @llvm.powi.v2f64.i32(<2 x double>, i32)
+
+define <2 x double> @powi_v2f64(<2 x double> %va, i32 %b) nounwind {
+; CHECK-LABEL: powi_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi.d $sp, $sp, -48
+; CHECK-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
+; CHECK-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
+; CHECK-NEXT: vst $vr0, $sp, 0 # 16-byte Folded Spill
+; CHECK-NEXT: addi.w $fp, $a0, 0
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powidf2)
+; CHECK-NEXT: movfr2gr.d $a0, $fa0
+; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 0
+; CHECK-NEXT: vst $vr0, $sp, 16 # 16-byte Folded Spill
+; CHECK-NEXT: vld $vr0, $sp, 0 # 16-byte Folded Reload
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 1
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 killed $vr0
+; CHECK-NEXT: move $a0, $fp
+; CHECK-NEXT: bl %plt(__powidf2)
+; CHECK-NEXT: movfr2gr.d $a0, $fa0
+; CHECK-NEXT: vld $vr0, $sp, 16 # 16-byte Folded Reload
+; CHECK-NEXT: vinsgr2vr.d $vr0, $a0, 1
+; CHECK-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
+; CHECK-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
+; CHECK-NEXT: addi.d $sp, $sp, 48
+; CHECK-NEXT: ret
+entry:
+ %res = call <2 x double> @llvm.powi.v2f64.i32(<2 x double> %va, i32 %b)
+ ret <2 x double> %res
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll
new file mode 100644
index 00000000000000..d99feb5fdd921c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpowi.ll
@@ -0,0 +1,1427 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+v,+f,+d -target-abi=ilp32d -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefix=RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d -verify-machineinstrs < %s \
+; RUN: | FileCheck %s --check-prefix=RV64
+
+define <1 x float> @powi_v1f32(<1 x float> %x, i32 %y) {
+; RV32-LABEL: powi_v1f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: call __powisf2
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vfmv.s.f v8, fa0
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: powi_v1f32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: sext.w a0, a0
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: call __powisf2
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vfmv.s.f v8, fa0
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %a = call <1 x float> @llvm.powi.v1f32.i32(<1 x float> %x, i32 %y)
+ ret <1 x float> %a
+}
+declare <1 x float> @llvm.powi.v1f32.i32(<1 x float>, i32)
+
+define <2 x float> @powi_v2f32(<2 x float> %x, i32 %y) {
+; RV32-LABEL: powi_v2f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset fs0, -16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 1 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v9
+; RV32-NEXT: call __powisf2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: flw fa0, 16(sp) # 8-byte Folded Reload
+; RV32-NEXT: mv a0, s0
+; RV32-NEXT: call __powisf2
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV32-NEXT: vfmv.v.f v8, fa0
+; RV32-NEXT: vfslide1down.vf v8, v8, fs0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 32
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
+; RV32-NEXT: .cfi_restore fs0
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: powi_v2f32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT: fsd fs0, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: .cfi_offset s0, -16
+; RV64-NEXT: .cfi_offset fs0, -24
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 1 * vlenb
+; RV64-NEXT: addi a1, sp, 32
+; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: sext.w s0, a0
+; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT: vslidedown.vi v9, v8, 1
+; RV64-NEXT: vfmv.f.s fa0, v9
+; RV64-NEXT: mv a0, s0
+; RV64-NEXT: call __powisf2
+; RV64-NEXT: fmv.s fs0, fa0
+; RV64-NEXT: flw fa0, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT: mv a0, s0
+; RV64-NEXT: call __powisf2
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; RV64-NEXT: vfmv.v.f v8, fa0
+; RV64-NEXT: vfslide1down.vf v8, v8, fs0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 64
+; RV64-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT: fld fs0, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT: .cfi_restore ra
+; RV64-NEXT: .cfi_restore s0
+; RV64-NEXT: .cfi_restore fs0
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
+ %a = call <2 x float> @llvm.powi.v2f32.i32(<2 x float> %x, i32 %y)
+ ret <2 x float> %a
+}
+declare <2 x float> @llvm.powi.v2f32.i32(<2 x float>, i32)
+
+define <3 x float> @powi_v3f32(<3 x float> %x, i32 %y) {
+; RV32-LABEL: powi_v3f32:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -32
+; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: .cfi_offset s0, -8
+; RV32-NEXT: .cfi_offset fs0, -16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 1
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
+; RV32-NEXT: mv s0, a0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vslidedown.vi v9, v8, 1
+; RV32-NEXT: vfmv.f.s fa0, v9
+; RV32-NEXT: call __powisf2
+; RV32-NEXT: fmv.s fs0, fa0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: flw fa0, 16(a0) # 8-byte Folded Reload
+; RV32-NEXT: mv a0, s0
+; RV32-NEXT: call __powisf2
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vfmv.v.f v8, fa0
+; RV32-NEXT: vfslide1down.vf v8, v8, fs0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vslidedown.vi v8, v8, 2
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: mv a0, s0
+; RV32-NEXT: call __powisf2
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vfslide1down.vf v8, v8, fa0
+; RV32-NEXT: vslidedown.vi v8, v8, 1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 32
+; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload
+; RV32-NEXT: .cfi_restore ra
+; RV32-NEXT: .cfi_restore s0
+; RV32-NEXT: .cfi_restore fs0
+; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: powi_v3f32:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; R...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/118242
More information about the llvm-commits
mailing list