[llvm] 48b6f4a - [RISCV] Rewrite spill-fpr-scalar.ll test to not use vsetvli. NFC (#91428)
via llvm-commits
llvm-commits at lists.llvm.org
Tue May 7 20:33:09 PDT 2024
Author: Luke Lau
Date: 2024-05-08T11:33:05+08:00
New Revision: 48b6f4a18255816df51fcab7648c5a7f205dfe14
URL: https://github.com/llvm/llvm-project/commit/48b6f4a18255816df51fcab7648c5a7f205dfe14
DIFF: https://github.com/llvm/llvm-project/commit/48b6f4a18255816df51fcab7648c5a7f205dfe14.diff
LOG: [RISCV] Rewrite spill-fpr-scalar.ll test to not use vsetvli. NFC (#91428)
It was relying on the fact that vsetvlis have side effects to prevent
reordering, but #91319 proposes to remove the side effects. This reworks
it to use volatile loads and stores instead.
Added:
Modified:
llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
index 48fb21dc5a8a0..6b9b88d90de61 100644
--- a/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
+++ b/llvm/test/CodeGen/RISCV/spill-fpr-scalar.ll
@@ -1,75 +1,58 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+zvfh -target-abi=lp64 \
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh -target-abi=lp64 \
; RUN: -verify-machineinstrs < %s \
; RUN: | FileCheck %s
-declare half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half>)
-declare float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float>)
-declare double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double>)
-
-declare <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(<vscale x 1 x half>, half, i64);
-declare <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(<vscale x 1 x float>, float, i64);
-declare <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(<vscale x 1 x double>, double, i64);
-
-define <vscale x 1 x half> @intrinsic_vfmv.f.s_s_nxv1f16(<vscale x 1 x half> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16:
-; CHECK: # %bb.0: # %entry
+define void @spill_half(ptr) nounwind {
+; CHECK-LABEL: spill_half:
+; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vfmv.f.s fa5, v8
+; CHECK-NEXT: flh fa5, 0(a0)
; CHECK-NEXT: fsh fa5, 14(sp) # 2-byte Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: flh fa5, 14(sp) # 2-byte Folded Reload
-; CHECK-NEXT: vfmv.v.f v8, fa5
+; CHECK-NEXT: fsh fa5, 0(a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-entry:
- %a = call half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half> %0)
- tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
- %b = call <vscale x 1 x half> @llvm.riscv.vfmv.v.f.nxv1f16(<vscale x 1 x half> undef, half %a, i64 %1)
- ret <vscale x 1 x half> %b
+ %2 = load volatile half, ptr %0
+ call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
+ store volatile half %2, ptr %0
+ ret void
}
-define <vscale x 1 x float> @intrinsic_vfmv.f.s_s_nxv1f32(<vscale x 1 x float> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32:
-; CHECK: # %bb.0: # %entry
+define void @spill_float(ptr) nounwind {
+; CHECK-LABEL: spill_float:
+; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vfmv.f.s fa5, v8
+; CHECK-NEXT: flw fa5, 0(a0)
; CHECK-NEXT: fsw fa5, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: flw fa5, 12(sp) # 4-byte Folded Reload
-; CHECK-NEXT: vfmv.v.f v8, fa5
+; CHECK-NEXT: fsw fa5, 0(a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-entry:
- %a = call float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float> %0)
- tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
- %b = call <vscale x 1 x float> @llvm.riscv.vfmv.v.f.nxv1f32(<vscale x 1 x float> undef, float %a, i64 %1)
- ret <vscale x 1 x float> %b
+ %2 = load volatile float, ptr %0
+ call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
+ store volatile float %2, ptr %0
+ ret void
}
-define <vscale x 1 x double> @intrinsic_vfmv.f.s_s_nxv1f64(<vscale x 1 x double> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f64:
-; CHECK: # %bb.0: # %entry
+define void @spill_double(ptr) nounwind {
+; CHECK-LABEL: spill_double:
+; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vfmv.f.s fa5, v8
+; CHECK-NEXT: fld fa5, 0(a0)
; CHECK-NEXT: fsd fa5, 8(sp) # 8-byte Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: fld fa5, 8(sp) # 8-byte Folded Reload
-; CHECK-NEXT: vfmv.v.f v8, fa5
+; CHECK-NEXT: fsd fa5, 0(a0)
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
-entry:
- %a = call double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double> %0)
- tail call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
- %b = call <vscale x 1 x double> @llvm.riscv.vfmv.v.f.nxv1f64(<vscale x 1 x double> undef, double %a, i64 %1)
- ret <vscale x 1 x double> %b
+ %2 = load volatile double, ptr %0
+ call void asm sideeffect "", "~{f0_d},~{f1_d},~{f2_d},~{f3_d},~{f4_d},~{f5_d},~{f6_d},~{f7_d},~{f8_d},~{f9_d},~{f10_d},~{f11_d},~{f12_d},~{f13_d},~{f14_d},~{f15_d},~{f16_d},~{f17_d},~{f18_d},~{f19_d},~{f20_d},~{f21_d},~{f22_d},~{f23_d},~{f24_d},~{f25_d},~{f26_d},~{f27_d},~{f28_d},~{f29_d},~{f30_d},~{f31_d}"()
+ store volatile double %2, ptr %0
+ ret void
}
More information about the llvm-commits
mailing list