[llvm] [ARM] Allow spilling FPSCR for MVE adc/sbc intrinsics (PR #115174)

Simon Tatham via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 6 08:02:56 PST 2024


================
@@ -0,0 +1,106 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple thumbv8.1m.main-arm-none-eabihf -mattr=+mve | FileCheck %s
+
+declare void @use_int32x4_t(<4 x i32>)
+
+; A 256-bit addition, with the two halves of the result passed to function
+; calls to spill the carry bit out of FPSCR.
+define void @add_256(<4 x i32> %a_low, <4 x i32> %a_high, <4 x i32> %b_low, <4 x i32> %b_high) {
+; CHECK-LABEL: add_256:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vadci.i32 q0, q0, q2
+; CHECK-NEXT:    vmov q4, q3
+; CHECK-NEXT:    vmov q5, q1
+; CHECK-NEXT:    vstr fpscr_nzcvqc, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    bl use_int32x4_t
+; CHECK-NEXT:    vldr fpscr_nzcvqc, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    vadc.i32 q0, q5, q4
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop.w {r7, lr}
+; CHECK-NEXT:    b use_int32x4_t
+entry:
+  %adc_low = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32> %a_low, <4 x i32> %b_low, i32 0)
+  %carry = extractvalue { <4 x i32>, i32 } %adc_low, 1
+  %result_low = extractvalue { <4 x i32>, i32 } %adc_low, 0
+  tail call void @use_int32x4_t(<4 x i32> %result_low)
+  %adc_high = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32> %a_high, <4 x i32> %b_high, i32 %carry)
+  %result_high = extractvalue { <4 x i32>, i32 } %adc_high, 0
+  tail call void @use_int32x4_t(<4 x i32> %result_high)
+  ret void
+}
+
+; A 256-bit subtraction, with the two halves of the result passed to function
+; calls to spill the carry bit out of FPSCR.
+define void @sub_256(<4 x i32> %a_low, <4 x i32> %a_high, <4 x i32> %b_low, <4 x i32> %b_high) {
+; CHECK-LABEL: sub_256:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    .save {r7, lr}
+; CHECK-NEXT:    push {r7, lr}
+; CHECK-NEXT:    .vsave {d8, d9, d10, d11}
+; CHECK-NEXT:    vpush {d8, d9, d10, d11}
+; CHECK-NEXT:    .pad #8
+; CHECK-NEXT:    sub sp, #8
+; CHECK-NEXT:    vsbci.i32 q0, q0, q2
+; CHECK-NEXT:    vmov q4, q3
+; CHECK-NEXT:    vmov q5, q1
+; CHECK-NEXT:    vstr fpscr_nzcvqc, [sp, #4] @ 4-byte Spill
+; CHECK-NEXT:    bl use_int32x4_t
+; CHECK-NEXT:    vldr fpscr_nzcvqc, [sp, #4] @ 4-byte Reload
+; CHECK-NEXT:    vsbc.i32 q0, q5, q4
+; CHECK-NEXT:    add sp, #8
+; CHECK-NEXT:    vpop {d8, d9, d10, d11}
+; CHECK-NEXT:    pop.w {r7, lr}
+; CHECK-NEXT:    b use_int32x4_t
+entry:
+  %adc_low = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a_low, <4 x i32> %b_low, i32 0)
+  %carry = extractvalue { <4 x i32>, i32 } %adc_low, 1
+  %result_low = extractvalue { <4 x i32>, i32 } %adc_low, 0
+  tail call void @use_int32x4_t(<4 x i32> %result_low)
+  %adc_high = tail call { <4 x i32>, i32 } @llvm.arm.mve.vsbc.v4i32(<4 x i32> %a_high, <4 x i32> %b_high, i32 %carry)
+  %result_high = extractvalue { <4 x i32>, i32 } %adc_high, 0
+  tail call void @use_int32x4_t(<4 x i32> %result_high)
+  ret void
+}
+
+; The carry-out of the first VADC intrinsic call is used by two other VADCs,
+; both of which will modify FPSCR, so it must be spilled and reloaded.
+; Missed optimisation: the first VLDR isn't needed, because the carry bit is
+; already in FPSCR.
----------------
statham-arm wrote:

I agree that fixing this missed optimisation is a separate problem from this patch. In particular, it seems as if it probably applies to other registers too – more like a general problem of "redundant reload of spilled thing" than about FPSCR specifically?

Another thing that strikes me about this output is that if I were writing the same code by hand I can't imagine myself deliberately writing a store _to the stack_. There are plenty of integer registers free in this code, and surely I'd "spill" FPSCR to one of those via VMRS, and reload it via VMSR, eliminating the need to set up a stack frame at all.

But that even more is out of scope of the patch that just teaches LLVM that the FPSCR flags are spillable at all.

https://github.com/llvm/llvm-project/pull/115174


More information about the llvm-commits mailing list