[llvm] 13f2a58 - [ARM] Fixup FP16 bitcasts
David Green via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 27 04:19:56 PST 2020
Author: David Green
Date: 2020-02-27T12:19:31Z
New Revision: 13f2a5883f2bf29b65b0a24592c2a38132e8946e
URL: https://github.com/llvm/llvm-project/commit/13f2a5883f2bf29b65b0a24592c2a38132e8946e
DIFF: https://github.com/llvm/llvm-project/commit/13f2a5883f2bf29b65b0a24592c2a38132e8946e.diff
LOG: [ARM] Fixup FP16 bitcasts
Under fp16 we optimise the bitcast between a VMOVhr and a CopyToReg via
custom lowering. This rewrites that to be a DAG combine instead, which
helps produce better code in the cases where the bitcast is actaully
legal.
Differential Revision: https://reviews.llvm.org/D72753
Added:
Modified:
llvm/lib/Target/ARM/ARMISelLowering.cpp
llvm/test/CodeGen/ARM/fp16-bitcast.ll
llvm/test/CodeGen/Thumb2/mve-div-expand.ll
llvm/test/CodeGen/Thumb2/mve-fmath.ll
llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
llvm/test/CodeGen/Thumb2/mve-masked-store.ll
llvm/test/CodeGen/Thumb2/mve-phireg.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 8fc80b897265..45ce55dbd2ca 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -717,7 +717,6 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
if (Subtarget->hasFullFP16()) {
addRegisterClass(MVT::f16, &ARM::HPRRegClass);
setOperationAction(ISD::BITCAST, MVT::i16, Custom);
- setOperationAction(ISD::BITCAST, MVT::i32, Custom);
setOperationAction(ISD::BITCAST, MVT::f16, Custom);
setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
@@ -5739,27 +5738,6 @@ static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
EVT DstVT = N->getValueType(0);
const bool HasFullFP16 = Subtarget->hasFullFP16();
- if (SrcVT == MVT::f32 && DstVT == MVT::i32) {
- // FullFP16: half values are passed in S-registers, and we don't
- // need any of the bitcast and moves:
- //
- // t2: f32,ch = CopyFromReg t0, Register:f32 %0
- // t5: i32 = bitcast t2
- // t18: f16 = ARMISD::VMOVhr t5
- if (Op.getOpcode() != ISD::CopyFromReg ||
- Op.getValueType() != MVT::f32)
- return SDValue();
-
- auto Move = N->use_begin();
- if (Move->getOpcode() != ARMISD::VMOVhr)
- return SDValue();
-
- SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
- SDValue Copy = DAG.getNode(ISD::CopyFromReg, SDLoc(Op), MVT::f16, Ops);
- DAG.ReplaceAllUsesWith(*Move, &Copy);
- return Copy;
- }
-
if (SrcVT == MVT::i16 && DstVT == MVT::f16) {
if (!HasFullFP16)
return SDValue();
@@ -12881,6 +12859,25 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
return SDValue();
}
+static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
+ // FullFP16: half values are passed in S-registers, and we don't
+ // need any of the bitcast and moves:
+ //
+ // t2: f32,ch = CopyFromReg t0, Register:f32 %0
+ // t5: i32 = bitcast t2
+ // t18: f16 = ARMISD::VMOVhr t5
+ SDValue BC = N->getOperand(0);
+ if (BC->getOpcode() != ISD::BITCAST)
+ return SDValue();
+ SDValue Copy = BC->getOperand(0);
+ if (Copy.getValueType() != MVT::f32 || Copy->getOpcode() != ISD::CopyFromReg)
+ return SDValue();
+
+ SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)};
+ SDValue NewCopy = DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), MVT::f16, Ops);
+ return NewCopy;
+}
+
/// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
/// are normal, non-volatile loads. If so, it is profitable to bitcast an
/// i64 vector to have f64 elements, since the value can then be loaded
@@ -14936,6 +14933,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ARMISD::BFI: return PerformBFICombine(N, DCI);
case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
+ case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI);
case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget);
case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
diff --git a/llvm/test/CodeGen/ARM/fp16-bitcast.ll b/llvm/test/CodeGen/ARM/fp16-bitcast.ll
index 450e534d7a51..6d6b809faac8 100644
--- a/llvm/test/CodeGen/ARM/fp16-bitcast.ll
+++ b/llvm/test/CodeGen/ARM/fp16-bitcast.ll
@@ -5,24 +5,13 @@
target triple = "thumbv8.1m.main-arm-unknown-eabi"
define float @add(float %a, float %b) {
-; CHECK-VFPV4-LABEL: add:
-; CHECK-VFPV4: @ %bb.0: @ %entry
-; CHECK-VFPV4-NEXT: vmov s0, r1
-; CHECK-VFPV4-NEXT: vmov s2, r0
-; CHECK-VFPV4-NEXT: vadd.f32 s0, s2, s0
-; CHECK-VFPV4-NEXT: vmov r0, s0
-; CHECK-VFPV4-NEXT: bx lr
-;
-; CHECK-FP16-LABEL: add:
-; CHECK-FP16: @ %bb.0: @ %entry
-; CHECK-FP16-NEXT: .pad #4
-; CHECK-FP16-NEXT: sub sp, #4
-; CHECK-FP16-NEXT: vmov s0, r1
-; CHECK-FP16-NEXT: vmov s2, r0
-; CHECK-FP16-NEXT: vadd.f32 s0, s2, s0
-; CHECK-FP16-NEXT: vstr s0, [sp]
-; CHECK-FP16-NEXT: ldr r0, [sp], #4
-; CHECK-FP16-NEXT: bx lr
+; CHECK-LABEL: add:
+; CHECK: @ %bb.0: @ %entry
+; CHECK-NEXT: vmov s0, r1
+; CHECK-NEXT: vmov s2, r0
+; CHECK-NEXT: vadd.f32 s0, s2, s0
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: bx lr
entry:
%add = fadd float %a, %b
ret float %add
diff --git a/llvm/test/CodeGen/Thumb2/mve-div-expand.ll b/llvm/test/CodeGen/Thumb2/mve-div-expand.ll
index fbadfcdfc131..41e5a27d2657 100644
--- a/llvm/test/CodeGen/Thumb2/mve-div-expand.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-div-expand.ll
@@ -890,38 +890,32 @@ define arm_aapcs_vfpcc <4 x float> @frem_f32(<4 x float> %in1, <4 x float> %in2)
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, lr}
; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: .vsave {d8, d9}
-; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r4, [sp]
-; CHECK-NEXT: vstr s2, [sp, #16]
-; CHECK-NEXT: vstr s6, [sp, #20]
-; CHECK-NEXT: vstr s3, [sp, #24]
-; CHECK-NEXT: vstr s7, [sp, #28]
-; CHECK-NEXT: vstr s4, [sp, #4]
-; CHECK-NEXT: vstr s1, [sp, #8]
-; CHECK-NEXT: vstr s5, [sp, #12]
-; CHECK-NEXT: ldrd r0, r1, [sp, #16]
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vmov q4, q1
+; CHECK-NEXT: vmov q5, q0
+; CHECK-NEXT: vmov r0, s22
+; CHECK-NEXT: vmov r1, s18
; CHECK-NEXT: bl fmodf
-; CHECK-NEXT: mov r5, r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #24]
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: vmov r0, s23
+; CHECK-NEXT: vmov r1, s19
; CHECK-NEXT: bl fmodf
-; CHECK-NEXT: ldr r1, [sp, #12]
+; CHECK-NEXT: vmov r2, s21
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r6, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: ldrd r6, r2, [sp, #4]
-; CHECK-NEXT: vmov s18, r5
+; CHECK-NEXT: vmov r5, s20
+; CHECK-NEXT: vmov s18, r4
; CHECK-NEXT: mov r0, r2
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s17, r0
-; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: mov r1, r6
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #32
-; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, r5, r6, pc}
entry:
%out = frem <4 x float> %in1, %in2
@@ -978,15 +972,12 @@ define arm_aapcs_vfpcc <8 x half> @frem_f16(<8 x half> %in1, <8 x half> %in2) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT: .pad #64
-; CHECK-NEXT: sub sp, #64
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vmov q5, q1
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #56]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s20
-; CHECK-NEXT: vstr s0, [sp, #60]
-; CHECK-NEXT: ldrd r0, r1, [sp, #56]
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s16
@@ -994,21 +985,19 @@ define arm_aapcs_vfpcc <8 x half> @frem_f16(<8 x half> %in1, <8 x half> %in2) {
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s20
-; CHECK-NEXT: vstr s2, [sp, #48]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #52]
-; CHECK-NEXT: ldrd r0, r1, [sp, #48]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q6[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #40]
-; CHECK-NEXT: vcvtb.f32.f16 s0, s21
-; CHECK-NEXT: vstr s0, [sp, #44]
; CHECK-NEXT: vmov.16 q6[1], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #40]
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s0, s21
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s17
@@ -1016,21 +1005,19 @@ define arm_aapcs_vfpcc <8 x half> @frem_f16(<8 x half> %in1, <8 x half> %in2) {
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmovx.f16 s0, s21
-; CHECK-NEXT: vstr s2, [sp, #32]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #36]
; CHECK-NEXT: vmov.16 q6[2], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #32]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: vcvtb.f32.f16 s0, s22
-; CHECK-NEXT: vstr s0, [sp, #28]
; CHECK-NEXT: vmov.16 q6[3], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s0, s22
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s18
@@ -1038,21 +1025,19 @@ define arm_aapcs_vfpcc <8 x half> @frem_f16(<8 x half> %in1, <8 x half> %in2) {
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmovx.f16 s0, s22
-; CHECK-NEXT: vstr s2, [sp, #16]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q6[4], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #16]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: vcvtb.f32.f16 s0, s23
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q6[5], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s0, s23
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s19
@@ -1060,18 +1045,16 @@ define arm_aapcs_vfpcc <8 x half> @frem_f16(<8 x half> %in1, <8 x half> %in2) {
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmovx.f16 s0, s23
-; CHECK-NEXT: vstr s2, [sp]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q6[6], r0
-; CHECK-NEXT: ldrd r0, r1, [sp]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl fmodf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q6[7], r0
; CHECK-NEXT: vmov q0, q6
-; CHECK-NEXT: add sp, #64
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: pop {r4, pc}
entry:
diff --git a/llvm/test/CodeGen/Thumb2/mve-fmath.ll b/llvm/test/CodeGen/Thumb2/mve-fmath.ll
index af9a3ed91b04..6fece1ac0c77 100644
--- a/llvm/test/CodeGen/Thumb2/mve-fmath.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-fmath.ll
@@ -85,25 +85,23 @@ define arm_aapcs_vfpcc <4 x float> @cos_float32_t(<4 x float> %src) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: vstmia sp, {s0, s1, s2, s3}
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: bl cosf
; CHECK-NEXT: mov r4, r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s19
; CHECK-NEXT: bl cosf
-; CHECK-NEXT: ldrd r5, r1, [sp]
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r5, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: vmov s18, r4
+; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -118,29 +116,24 @@ define arm_aapcs_vfpcc <8 x half> @cos_float16_t(<8 x half> %src) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #28]
-; CHECK-NEXT: ldr r0, [sp, #28]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s16
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: ldr r0, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q5[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q5[1], r0
-; CHECK-NEXT: ldr r0, [sp, #20]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -148,16 +141,14 @@ define arm_aapcs_vfpcc <8 x half> @cos_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s17
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[2], r0
-; CHECK-NEXT: vstr s0, [sp, #16]
-; CHECK-NEXT: ldr r0, [sp, #16]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q5[3], r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -165,16 +156,14 @@ define arm_aapcs_vfpcc <8 x half> @cos_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s18
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[4], r0
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q5[5], r0
-; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -182,15 +171,13 @@ define arm_aapcs_vfpcc <8 x half> @cos_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s19
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[6], r0
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl cosf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q5[7], r0
; CHECK-NEXT: vmov q0, q5
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -229,25 +216,23 @@ define arm_aapcs_vfpcc <4 x float> @sin_float32_t(<4 x float> %src) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: vstmia sp, {s0, s1, s2, s3}
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: bl sinf
; CHECK-NEXT: mov r4, r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s19
; CHECK-NEXT: bl sinf
-; CHECK-NEXT: ldrd r5, r1, [sp]
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r5, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: vmov s18, r4
+; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -262,29 +247,24 @@ define arm_aapcs_vfpcc <8 x half> @sin_float16_t(<8 x half> %src) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #28]
-; CHECK-NEXT: ldr r0, [sp, #28]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s16
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: ldr r0, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q5[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q5[1], r0
-; CHECK-NEXT: ldr r0, [sp, #20]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -292,16 +272,14 @@ define arm_aapcs_vfpcc <8 x half> @sin_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s17
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[2], r0
-; CHECK-NEXT: vstr s0, [sp, #16]
-; CHECK-NEXT: ldr r0, [sp, #16]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q5[3], r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -309,16 +287,14 @@ define arm_aapcs_vfpcc <8 x half> @sin_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s18
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[4], r0
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q5[5], r0
-; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -326,15 +302,13 @@ define arm_aapcs_vfpcc <8 x half> @sin_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s19
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[6], r0
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl sinf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q5[7], r0
; CHECK-NEXT: vmov q0, q5
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -373,25 +347,23 @@ define arm_aapcs_vfpcc <4 x float> @exp_float32_t(<4 x float> %src) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: vstmia sp, {s0, s1, s2, s3}
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: bl expf
; CHECK-NEXT: mov r4, r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s19
; CHECK-NEXT: bl expf
-; CHECK-NEXT: ldrd r5, r1, [sp]
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r5, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: vmov s18, r4
+; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -406,29 +378,24 @@ define arm_aapcs_vfpcc <8 x half> @exp_float16_t(<8 x half> %src) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #28]
-; CHECK-NEXT: ldr r0, [sp, #28]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s16
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: ldr r0, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q5[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q5[1], r0
-; CHECK-NEXT: ldr r0, [sp, #20]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -436,16 +403,14 @@ define arm_aapcs_vfpcc <8 x half> @exp_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s17
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[2], r0
-; CHECK-NEXT: vstr s0, [sp, #16]
-; CHECK-NEXT: ldr r0, [sp, #16]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q5[3], r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -453,16 +418,14 @@ define arm_aapcs_vfpcc <8 x half> @exp_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s18
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[4], r0
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q5[5], r0
-; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -470,15 +433,13 @@ define arm_aapcs_vfpcc <8 x half> @exp_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s19
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[6], r0
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl expf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q5[7], r0
; CHECK-NEXT: vmov q0, q5
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -517,25 +478,23 @@ define arm_aapcs_vfpcc <4 x float> @exp2_float32_t(<4 x float> %src) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: vstmia sp, {s0, s1, s2, s3}
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: mov r4, r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s19
; CHECK-NEXT: bl exp2f
-; CHECK-NEXT: ldrd r5, r1, [sp]
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r5, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: vmov s18, r4
+; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -550,29 +509,24 @@ define arm_aapcs_vfpcc <8 x half> @exp2_float16_t(<8 x half> %src) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #28]
-; CHECK-NEXT: ldr r0, [sp, #28]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s16
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: ldr r0, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q5[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q5[1], r0
-; CHECK-NEXT: ldr r0, [sp, #20]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -580,16 +534,14 @@ define arm_aapcs_vfpcc <8 x half> @exp2_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s17
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[2], r0
-; CHECK-NEXT: vstr s0, [sp, #16]
-; CHECK-NEXT: ldr r0, [sp, #16]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q5[3], r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -597,16 +549,14 @@ define arm_aapcs_vfpcc <8 x half> @exp2_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s18
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[4], r0
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q5[5], r0
-; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -614,15 +564,13 @@ define arm_aapcs_vfpcc <8 x half> @exp2_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s19
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[6], r0
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl exp2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q5[7], r0
; CHECK-NEXT: vmov q0, q5
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -661,25 +609,23 @@ define arm_aapcs_vfpcc <4 x float> @log_float32_t(<4 x float> %src) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: vstmia sp, {s0, s1, s2, s3}
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: bl logf
; CHECK-NEXT: mov r4, r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s19
; CHECK-NEXT: bl logf
-; CHECK-NEXT: ldrd r5, r1, [sp]
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r5, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: vmov s18, r4
+; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -694,29 +640,24 @@ define arm_aapcs_vfpcc <8 x half> @log_float16_t(<8 x half> %src) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #28]
-; CHECK-NEXT: ldr r0, [sp, #28]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s16
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: ldr r0, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q5[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q5[1], r0
-; CHECK-NEXT: ldr r0, [sp, #20]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -724,16 +665,14 @@ define arm_aapcs_vfpcc <8 x half> @log_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s17
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[2], r0
-; CHECK-NEXT: vstr s0, [sp, #16]
-; CHECK-NEXT: ldr r0, [sp, #16]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q5[3], r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -741,16 +680,14 @@ define arm_aapcs_vfpcc <8 x half> @log_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s18
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[4], r0
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q5[5], r0
-; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -758,15 +695,13 @@ define arm_aapcs_vfpcc <8 x half> @log_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s19
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[6], r0
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl logf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q5[7], r0
; CHECK-NEXT: vmov q0, q5
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -805,25 +740,23 @@ define arm_aapcs_vfpcc <4 x float> @log2_float32_t(<4 x float> %src) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: vstmia sp, {s0, s1, s2, s3}
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: bl log2f
; CHECK-NEXT: mov r4, r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s19
; CHECK-NEXT: bl log2f
-; CHECK-NEXT: ldrd r5, r1, [sp]
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r5, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: vmov s18, r4
+; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -838,29 +771,24 @@ define arm_aapcs_vfpcc <8 x half> @log2_float16_t(<8 x half> %src) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #28]
-; CHECK-NEXT: ldr r0, [sp, #28]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s16
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: ldr r0, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q5[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q5[1], r0
-; CHECK-NEXT: ldr r0, [sp, #20]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -868,16 +796,14 @@ define arm_aapcs_vfpcc <8 x half> @log2_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s17
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[2], r0
-; CHECK-NEXT: vstr s0, [sp, #16]
-; CHECK-NEXT: ldr r0, [sp, #16]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q5[3], r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -885,16 +811,14 @@ define arm_aapcs_vfpcc <8 x half> @log2_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s18
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[4], r0
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q5[5], r0
-; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -902,15 +826,13 @@ define arm_aapcs_vfpcc <8 x half> @log2_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s19
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[6], r0
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q5[7], r0
; CHECK-NEXT: vmov q0, q5
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -949,25 +871,23 @@ define arm_aapcs_vfpcc <4 x float> @log10_float32_t(<4 x float> %src) {
; CHECK-NEXT: push {r4, r5, r7, lr}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #16
-; CHECK-NEXT: sub sp, #16
-; CHECK-NEXT: vstmia sp, {s0, s1, s2, s3}
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov q4, q0
+; CHECK-NEXT: vmov r0, s18
; CHECK-NEXT: bl log10f
; CHECK-NEXT: mov r4, r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s19
; CHECK-NEXT: bl log10f
-; CHECK-NEXT: ldrd r5, r1, [sp]
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r5, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: vmov s18, r4
+; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s17, r0
; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #16
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
@@ -982,29 +902,24 @@ define arm_aapcs_vfpcc <8 x half> @log10_float16_t(<8 x half> %src) {
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #28]
-; CHECK-NEXT: ldr r0, [sp, #28]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s16
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: ldr r0, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q5[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q5[1], r0
-; CHECK-NEXT: ldr r0, [sp, #20]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -1012,16 +927,14 @@ define arm_aapcs_vfpcc <8 x half> @log10_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s17
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[2], r0
-; CHECK-NEXT: vstr s0, [sp, #16]
-; CHECK-NEXT: ldr r0, [sp, #16]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q5[3], r0
-; CHECK-NEXT: ldr r0, [sp, #12]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -1029,16 +942,14 @@ define arm_aapcs_vfpcc <8 x half> @log10_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s18
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[4], r0
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: ldr r0, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q5[5], r0
-; CHECK-NEXT: ldr r0, [sp, #4]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
@@ -1046,15 +957,13 @@ define arm_aapcs_vfpcc <8 x half> @log10_float16_t(<8 x half> %src) {
; CHECK-NEXT: vmovx.f16 s0, s19
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
; CHECK-NEXT: vmov.16 q5[6], r0
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r0, [sp]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl log10f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q5[7], r0
; CHECK-NEXT: vmov q0, q5
-; CHECK-NEXT: add sp, #32
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -1091,38 +1000,32 @@ define arm_aapcs_vfpcc <4 x float> @pow_float32_t(<4 x float> %src1, <4 x float>
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r4, r5, r6, lr}
; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: .vsave {d8, d9}
-; CHECK-NEXT: vpush {d8, d9}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
-; CHECK-NEXT: vstr s0, [sp]
-; CHECK-NEXT: ldr r4, [sp]
-; CHECK-NEXT: vstr s2, [sp, #16]
-; CHECK-NEXT: vstr s6, [sp, #20]
-; CHECK-NEXT: vstr s3, [sp, #24]
-; CHECK-NEXT: vstr s7, [sp, #28]
-; CHECK-NEXT: vstr s4, [sp, #4]
-; CHECK-NEXT: vstr s1, [sp, #8]
-; CHECK-NEXT: vstr s5, [sp, #12]
-; CHECK-NEXT: ldrd r0, r1, [sp, #16]
+; CHECK-NEXT: .vsave {d8, d9, d10, d11}
+; CHECK-NEXT: vpush {d8, d9, d10, d11}
+; CHECK-NEXT: vmov q4, q1
+; CHECK-NEXT: vmov q5, q0
+; CHECK-NEXT: vmov r0, s22
+; CHECK-NEXT: vmov r1, s18
; CHECK-NEXT: bl powf
-; CHECK-NEXT: mov r5, r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #24]
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: vmov r0, s23
+; CHECK-NEXT: vmov r1, s19
; CHECK-NEXT: bl powf
-; CHECK-NEXT: ldr r1, [sp, #12]
+; CHECK-NEXT: vmov r2, s21
+; CHECK-NEXT: vmov r1, s17
+; CHECK-NEXT: vmov r6, s16
; CHECK-NEXT: vmov s19, r0
-; CHECK-NEXT: ldrd r6, r2, [sp, #4]
-; CHECK-NEXT: vmov s18, r5
+; CHECK-NEXT: vmov r5, s20
+; CHECK-NEXT: vmov s18, r4
; CHECK-NEXT: mov r0, r2
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s17, r0
-; CHECK-NEXT: mov r0, r4
+; CHECK-NEXT: mov r0, r5
; CHECK-NEXT: mov r1, r6
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s16, r0
; CHECK-NEXT: vmov q0, q4
-; CHECK-NEXT: add sp, #32
-; CHECK-NEXT: vpop {d8, d9}
+; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: pop {r4, r5, r6, pc}
entry:
%0 = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %src1, <4 x float> %src2)
@@ -1136,15 +1039,12 @@ define arm_aapcs_vfpcc <8 x half> @pow_float16_t(<8 x half> %src1, <8 x half> %s
; CHECK-NEXT: push {r4, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
-; CHECK-NEXT: .pad #64
-; CHECK-NEXT: sub sp, #64
; CHECK-NEXT: vmov q4, q0
; CHECK-NEXT: vmov q5, q1
; CHECK-NEXT: vcvtb.f32.f16 s0, s16
-; CHECK-NEXT: vstr s0, [sp, #56]
+; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s20
-; CHECK-NEXT: vstr s0, [sp, #60]
-; CHECK-NEXT: ldrd r0, r1, [sp, #56]
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s16
@@ -1152,21 +1052,19 @@ define arm_aapcs_vfpcc <8 x half> @pow_float16_t(<8 x half> %src1, <8 x half> %s
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r4, s0
; CHECK-NEXT: vmovx.f16 s0, s20
-; CHECK-NEXT: vstr s2, [sp, #48]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #52]
-; CHECK-NEXT: ldrd r0, r1, [sp, #48]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov.16 q6[0], r4
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s17
-; CHECK-NEXT: vstr s0, [sp, #40]
-; CHECK-NEXT: vcvtb.f32.f16 s0, s21
-; CHECK-NEXT: vstr s0, [sp, #44]
; CHECK-NEXT: vmov.16 q6[1], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #40]
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s0, s21
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s17
@@ -1174,21 +1072,19 @@ define arm_aapcs_vfpcc <8 x half> @pow_float16_t(<8 x half> %src1, <8 x half> %s
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmovx.f16 s0, s21
-; CHECK-NEXT: vstr s2, [sp, #32]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #36]
; CHECK-NEXT: vmov.16 q6[2], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #32]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s18
-; CHECK-NEXT: vstr s0, [sp, #24]
-; CHECK-NEXT: vcvtb.f32.f16 s0, s22
-; CHECK-NEXT: vstr s0, [sp, #28]
; CHECK-NEXT: vmov.16 q6[3], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #24]
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s0, s22
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s18
@@ -1196,21 +1092,19 @@ define arm_aapcs_vfpcc <8 x half> @pow_float16_t(<8 x half> %src1, <8 x half> %s
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmovx.f16 s0, s22
-; CHECK-NEXT: vstr s2, [sp, #16]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #20]
; CHECK-NEXT: vmov.16 q6[4], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #16]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vcvtb.f32.f16 s0, s19
-; CHECK-NEXT: vstr s0, [sp, #8]
-; CHECK-NEXT: vcvtb.f32.f16 s0, s23
-; CHECK-NEXT: vstr s0, [sp, #12]
; CHECK-NEXT: vmov.16 q6[5], r0
-; CHECK-NEXT: ldrd r0, r1, [sp, #8]
+; CHECK-NEXT: vmov r0, s0
+; CHECK-NEXT: vcvtb.f32.f16 s0, s23
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmovx.f16 s2, s19
@@ -1218,18 +1112,16 @@ define arm_aapcs_vfpcc <8 x half> @pow_float16_t(<8 x half> %src1, <8 x half> %s
; CHECK-NEXT: vcvtb.f32.f16 s2, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmovx.f16 s0, s23
-; CHECK-NEXT: vstr s2, [sp]
; CHECK-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-NEXT: vstr s0, [sp, #4]
; CHECK-NEXT: vmov.16 q6[6], r0
-; CHECK-NEXT: ldrd r0, r1, [sp]
+; CHECK-NEXT: vmov r0, s2
+; CHECK-NEXT: vmov r1, s0
; CHECK-NEXT: bl powf
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vcvtb.f16.f32 s0, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: vmov.16 q6[7], r0
; CHECK-NEXT: vmov q0, q6
-; CHECK-NEXT: add sp, #64
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: pop {r4, pc}
entry:
@@ -1267,40 +1159,29 @@ entry:
define arm_aapcs_vfpcc <4 x float> @copysign_float32_t(<4 x float> %src1, <4 x float> %src2) {
; CHECK-LABEL: copysign_float32_t:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: .save {r4, r5, r6, lr}
-; CHECK-NEXT: push {r4, r5, r6, lr}
-; CHECK-NEXT: .pad #32
-; CHECK-NEXT: sub sp, #32
-; CHECK-NEXT: vstr s5, [sp, #8]
-; CHECK-NEXT: ldr.w r12, [sp, #8]
-; CHECK-NEXT: vstr s6, [sp, #16]
-; CHECK-NEXT: ldr.w lr, [sp, #16]
-; CHECK-NEXT: vstr s7, [sp, #24]
-; CHECK-NEXT: lsr.w r2, r12, #31
-; CHECK-NEXT: ldr r6, [sp, #24]
-; CHECK-NEXT: vstr s3, [sp, #28]
-; CHECK-NEXT: ldr r3, [sp, #28]
-; CHECK-NEXT: vstr s4, [sp]
-; CHECK-NEXT: ldr r0, [sp]
-; CHECK-NEXT: vstr s0, [sp, #4]
-; CHECK-NEXT: ldr r1, [sp, #4]
-; CHECK-NEXT: vstr s1, [sp, #12]
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: vmov r0, s5
+; CHECK-NEXT: vmov lr, s6
+; CHECK-NEXT: vmov r12, s7
+; CHECK-NEXT: vmov r3, s1
+; CHECK-NEXT: vmov r2, s2
+; CHECK-NEXT: vmov r1, s3
+; CHECK-NEXT: vmov r4, s4
+; CHECK-NEXT: vmov r5, s0
; CHECK-NEXT: lsrs r0, r0, #31
-; CHECK-NEXT: vstr s2, [sp, #20]
+; CHECK-NEXT: bfi r3, r0, #31, #1
+; CHECK-NEXT: lsr.w r0, lr, #31
+; CHECK-NEXT: bfi r2, r0, #31, #1
+; CHECK-NEXT: lsr.w r0, r12, #31
; CHECK-NEXT: bfi r1, r0, #31, #1
-; CHECK-NEXT: ldr r4, [sp, #12]
-; CHECK-NEXT: ldr r5, [sp, #20]
-; CHECK-NEXT: bfi r4, r2, #31, #1
-; CHECK-NEXT: lsr.w r2, lr, #31
-; CHECK-NEXT: bfi r5, r2, #31, #1
-; CHECK-NEXT: lsrs r2, r6, #31
-; CHECK-NEXT: bfi r3, r2, #31, #1
-; CHECK-NEXT: vmov s3, r3
-; CHECK-NEXT: vmov s2, r5
-; CHECK-NEXT: vmov s1, r4
-; CHECK-NEXT: vmov s0, r1
-; CHECK-NEXT: add sp, #32
-; CHECK-NEXT: pop {r4, r5, r6, pc}
+; CHECK-NEXT: vmov s3, r1
+; CHECK-NEXT: lsrs r0, r4, #31
+; CHECK-NEXT: vmov s2, r2
+; CHECK-NEXT: bfi r5, r0, #31, #1
+; CHECK-NEXT: vmov s1, r3
+; CHECK-NEXT: vmov s0, r5
+; CHECK-NEXT: pop {r4, r5, r7, pc}
entry:
%0 = call fast <4 x float> @llvm.copysign.v4f32(<4 x float> %src1, <4 x float> %src2)
ret <4 x float> %0
diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
index b92163bcbd32..3b49649e8cdf 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll
@@ -870,8 +870,8 @@ define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
-; CHECK-NEXT: .pad #24
-; CHECK-NEXT: sub sp, #24
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, #8
; CHECK-NEXT: vldrh.s32 q0, [r1]
; CHECK-NEXT: mov.w lr, #0
; CHECK-NEXT: @ implicit-def: $q1
@@ -967,26 +967,22 @@ define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%
; CHECK-NEXT: rsbs r2, r2, #0
; CHECK-NEXT: bfi r1, r2, #3, #1
; CHECK-NEXT: lsls r2, r1, #31
-; CHECK-NEXT: ittt ne
-; CHECK-NEXT: vstrne s0, [sp, #12]
-; CHECK-NEXT: ldrne r2, [sp, #12]
+; CHECK-NEXT: itt ne
+; CHECK-NEXT: vmovne r2, s0
; CHECK-NEXT: strne r2, [r0]
; CHECK-NEXT: lsls r2, r1, #30
-; CHECK-NEXT: ittt mi
-; CHECK-NEXT: vstrmi s1, [sp, #8]
-; CHECK-NEXT: ldrmi r2, [sp, #8]
+; CHECK-NEXT: itt mi
+; CHECK-NEXT: vmovmi r2, s1
; CHECK-NEXT: strmi r2, [r0, #4]
; CHECK-NEXT: lsls r2, r1, #29
-; CHECK-NEXT: ittt mi
-; CHECK-NEXT: vstrmi s2, [sp, #4]
-; CHECK-NEXT: ldrmi r2, [sp, #4]
+; CHECK-NEXT: itt mi
+; CHECK-NEXT: vmovmi r2, s2
; CHECK-NEXT: strmi r2, [r0, #8]
; CHECK-NEXT: lsls r1, r1, #28
-; CHECK-NEXT: ittt mi
-; CHECK-NEXT: vstrmi s3, [sp]
-; CHECK-NEXT: ldrmi r1, [sp]
+; CHECK-NEXT: itt mi
+; CHECK-NEXT: vmovmi r1, s3
; CHECK-NEXT: strmi r1, [r0, #12]
-; CHECK-NEXT: add sp, #24
+; CHECK-NEXT: add sp, #8
; CHECK-NEXT: pop {r7, pc}
entry:
%0 = load <4 x i16>, <4 x i16>* %mask, align 2
@@ -1002,8 +998,8 @@ define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .save {r7, lr}
; CHECK-NEXT: push {r7, lr}
-; CHECK-NEXT: .pad #24
-; CHECK-NEXT: sub sp, #24
+; CHECK-NEXT: .pad #8
+; CHECK-NEXT: sub sp, #8
; CHECK-NEXT: vldrh.s32 q0, [r1]
; CHECK-NEXT: mov.w lr, #0
; CHECK-NEXT: @ implicit-def: $q1
@@ -1099,26 +1095,22 @@ define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4
; CHECK-NEXT: rsbs r2, r2, #0
; CHECK-NEXT: bfi r1, r2, #3, #1
; CHECK-NEXT: lsls r2, r1, #31
-; CHECK-NEXT: ittt ne
-; CHECK-NEXT: vstrne s0, [sp, #12]
-; CHECK-NEXT: ldrne r2, [sp, #12]
+; CHECK-NEXT: itt ne
+; CHECK-NEXT: vmovne r2, s0
; CHECK-NEXT: strne r2, [r0]
; CHECK-NEXT: lsls r2, r1, #30
-; CHECK-NEXT: ittt mi
-; CHECK-NEXT: vstrmi s1, [sp, #8]
-; CHECK-NEXT: ldrmi r2, [sp, #8]
+; CHECK-NEXT: itt mi
+; CHECK-NEXT: vmovmi r2, s1
; CHECK-NEXT: strmi r2, [r0, #4]
; CHECK-NEXT: lsls r2, r1, #29
-; CHECK-NEXT: ittt mi
-; CHECK-NEXT: vstrmi s2, [sp, #4]
-; CHECK-NEXT: ldrmi r2, [sp, #4]
+; CHECK-NEXT: itt mi
+; CHECK-NEXT: vmovmi r2, s2
; CHECK-NEXT: strmi r2, [r0, #8]
; CHECK-NEXT: lsls r1, r1, #28
-; CHECK-NEXT: ittt mi
-; CHECK-NEXT: vstrmi s3, [sp]
-; CHECK-NEXT: ldrmi r1, [sp]
+; CHECK-NEXT: itt mi
+; CHECK-NEXT: vmovmi r1, s3
; CHECK-NEXT: strmi r1, [r0, #12]
-; CHECK-NEXT: add sp, #24
+; CHECK-NEXT: add sp, #8
; CHECK-NEXT: pop {r7, pc}
entry:
%0 = load <4 x i16>, <4 x i16>* %mask, align 2
diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
index 7e320626dd51..215fe12334e9 100644
--- a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll
@@ -483,8 +483,8 @@ entry:
define arm_aapcs_vfpcc void @masked_v4f32_align1(<4 x float> *%dest, <4 x float> %a, <4 x i32> %b) {
; CHECK-LE-LABEL: masked_v4f32_align1:
; CHECK-LE: @ %bb.0: @ %entry
-; CHECK-LE-NEXT: .pad #20
-; CHECK-LE-NEXT: sub sp, #20
+; CHECK-LE-NEXT: .pad #4
+; CHECK-LE-NEXT: sub sp, #4
; CHECK-LE-NEXT: vcmp.i32 ne, q1, zr
; CHECK-LE-NEXT: movs r1, #0
; CHECK-LE-NEXT: vmrs r2, p0
@@ -501,32 +501,28 @@ define arm_aapcs_vfpcc void @masked_v4f32_align1(<4 x float> *%dest, <4 x float>
; CHECK-LE-NEXT: rsbs r2, r2, #0
; CHECK-LE-NEXT: bfi r1, r2, #3, #1
; CHECK-LE-NEXT: lsls r2, r1, #31
-; CHECK-LE-NEXT: ittt ne
-; CHECK-LE-NEXT: vstrne s0, [sp, #12]
-; CHECK-LE-NEXT: ldrne r2, [sp, #12]
+; CHECK-LE-NEXT: itt ne
+; CHECK-LE-NEXT: vmovne r2, s0
; CHECK-LE-NEXT: strne r2, [r0]
; CHECK-LE-NEXT: lsls r2, r1, #30
-; CHECK-LE-NEXT: ittt mi
-; CHECK-LE-NEXT: vstrmi s1, [sp, #8]
-; CHECK-LE-NEXT: ldrmi r2, [sp, #8]
+; CHECK-LE-NEXT: itt mi
+; CHECK-LE-NEXT: vmovmi r2, s1
; CHECK-LE-NEXT: strmi r2, [r0, #4]
; CHECK-LE-NEXT: lsls r2, r1, #29
-; CHECK-LE-NEXT: ittt mi
-; CHECK-LE-NEXT: vstrmi s2, [sp, #4]
-; CHECK-LE-NEXT: ldrmi r2, [sp, #4]
+; CHECK-LE-NEXT: itt mi
+; CHECK-LE-NEXT: vmovmi r2, s2
; CHECK-LE-NEXT: strmi r2, [r0, #8]
; CHECK-LE-NEXT: lsls r1, r1, #28
-; CHECK-LE-NEXT: ittt mi
-; CHECK-LE-NEXT: vstrmi s3, [sp]
-; CHECK-LE-NEXT: ldrmi r1, [sp]
+; CHECK-LE-NEXT: itt mi
+; CHECK-LE-NEXT: vmovmi r1, s3
; CHECK-LE-NEXT: strmi r1, [r0, #12]
-; CHECK-LE-NEXT: add sp, #20
+; CHECK-LE-NEXT: add sp, #4
; CHECK-LE-NEXT: bx lr
;
; CHECK-BE-LABEL: masked_v4f32_align1:
; CHECK-BE: @ %bb.0: @ %entry
-; CHECK-BE-NEXT: .pad #20
-; CHECK-BE-NEXT: sub sp, #20
+; CHECK-BE-NEXT: .pad #4
+; CHECK-BE-NEXT: sub sp, #4
; CHECK-BE-NEXT: vrev64.32 q2, q1
; CHECK-BE-NEXT: movs r1, #0
; CHECK-BE-NEXT: vcmp.i32 ne, q2, zr
@@ -545,26 +541,22 @@ define arm_aapcs_vfpcc void @masked_v4f32_align1(<4 x float> *%dest, <4 x float>
; CHECK-BE-NEXT: rsbs r2, r2, #0
; CHECK-BE-NEXT: bfi r1, r2, #3, #1
; CHECK-BE-NEXT: lsls r2, r1, #31
-; CHECK-BE-NEXT: ittt ne
-; CHECK-BE-NEXT: vstrne s4, [sp, #12]
-; CHECK-BE-NEXT: ldrne r2, [sp, #12]
+; CHECK-BE-NEXT: itt ne
+; CHECK-BE-NEXT: vmovne r2, s4
; CHECK-BE-NEXT: strne r2, [r0]
; CHECK-BE-NEXT: lsls r2, r1, #30
-; CHECK-BE-NEXT: ittt mi
-; CHECK-BE-NEXT: vstrmi s5, [sp, #8]
-; CHECK-BE-NEXT: ldrmi r2, [sp, #8]
+; CHECK-BE-NEXT: itt mi
+; CHECK-BE-NEXT: vmovmi r2, s5
; CHECK-BE-NEXT: strmi r2, [r0, #4]
; CHECK-BE-NEXT: lsls r2, r1, #29
-; CHECK-BE-NEXT: ittt mi
-; CHECK-BE-NEXT: vstrmi s6, [sp, #4]
-; CHECK-BE-NEXT: ldrmi r2, [sp, #4]
+; CHECK-BE-NEXT: itt mi
+; CHECK-BE-NEXT: vmovmi r2, s6
; CHECK-BE-NEXT: strmi r2, [r0, #8]
; CHECK-BE-NEXT: lsls r1, r1, #28
-; CHECK-BE-NEXT: ittt mi
-; CHECK-BE-NEXT: vstrmi s7, [sp]
-; CHECK-BE-NEXT: ldrmi r1, [sp]
+; CHECK-BE-NEXT: itt mi
+; CHECK-BE-NEXT: vmovmi r1, s7
; CHECK-BE-NEXT: strmi r1, [r0, #12]
-; CHECK-BE-NEXT: add sp, #20
+; CHECK-BE-NEXT: add sp, #4
; CHECK-BE-NEXT: bx lr
entry:
%c = icmp ugt <4 x i32> %b, zeroinitializer
diff --git a/llvm/test/CodeGen/Thumb2/mve-phireg.ll b/llvm/test/CodeGen/Thumb2/mve-phireg.ll
index 955e72c77a5c..bc2ef68a8eb5 100644
--- a/llvm/test/CodeGen/Thumb2/mve-phireg.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-phireg.ll
@@ -147,80 +147,70 @@ vector.body115: ; preds = %vector.body115, %ve
define dso_local i32 @e() #0 {
; CHECK-LABEL: e:
; CHECK: @ %bb.0: @ %entry
-; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
-; CHECK-NEXT: .pad #4
-; CHECK-NEXT: sub sp, #4
+; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr}
+; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
-; CHECK-NEXT: .pad #440
-; CHECK-NEXT: sub sp, #440
-; CHECK-NEXT: vldr s20, .LCPI1_0
+; CHECK-NEXT: .pad #392
+; CHECK-NEXT: sub sp, #392
; CHECK-NEXT: movw r9, :lower16:.L_MergedGlobals
-; CHECK-NEXT: vldr s23, .LCPI1_1
+; CHECK-NEXT: vldr s0, .LCPI1_0
; CHECK-NEXT: movt r9, :upper16:.L_MergedGlobals
-; CHECK-NEXT: mov.w r8, #4
+; CHECK-NEXT: vldr s3, .LCPI1_1
; CHECK-NEXT: mov r5, r9
-; CHECK-NEXT: strh.w r8, [sp, #438]
-; CHECK-NEXT: movs r6, #0
-; CHECK-NEXT: vstr s23, [sp, #48]
; CHECK-NEXT: mov r7, r9
-; CHECK-NEXT: vstr s23, [sp, #40]
+; CHECK-NEXT: ldr r1, [r5, #8]!
+; CHECK-NEXT: vmov r6, s3
+; CHECK-NEXT: ldr r0, [r7, #4]!
; CHECK-NEXT: movw r4, :lower16:e
-; CHECK-NEXT: ldr r1, [r5, #4]!
+; CHECK-NEXT: vmov.32 q4[0], r5
; CHECK-NEXT: movt r4, :upper16:e
-; CHECK-NEXT: str r6, [sp, #76]
-; CHECK-NEXT: vmov s5, r4
-; CHECK-NEXT: vmov.32 q7[0], r5
-; CHECK-NEXT: ldr r0, [r7, #8]!
-; CHECK-NEXT: vmov q0, q7
-; CHECK-NEXT: ldr r2, [sp, #48]
-; CHECK-NEXT: vmov.32 q0[1], r5
-; CHECK-NEXT: vmov s21, r5
-; CHECK-NEXT: vmov.32 q0[2], r2
-; CHECK-NEXT: ldr r2, [sp, #40]
-; CHECK-NEXT: vdup.32 q2, r5
-; CHECK-NEXT: vmov.32 q4[0], r7
-; CHECK-NEXT: vmov q6, q4
-; CHECK-NEXT: vstrw.32 q2, [sp] @ 16-byte Spill
-; CHECK-NEXT: vmov.32 q6[1], r2
-; CHECK-NEXT: vldrw.u32 q3, [sp] @ 16-byte Reload
-; CHECK-NEXT: vmov.f32 s22, s21
-; CHECK-NEXT: vmov.32 q6[2], r7
-; CHECK-NEXT: vmov.f32 s4, s20
-; CHECK-NEXT: vstrw.32 q2, [sp, #16] @ 16-byte Spill
-; CHECK-NEXT: vmov.f32 s6, s21
+; CHECK-NEXT: vmov q1, q4
+; CHECK-NEXT: vmov s1, r7
+; CHECK-NEXT: vmov.32 q1[1], r6
+; CHECK-NEXT: mov.w r10, #0
+; CHECK-NEXT: vmov.32 q1[2], r5
+; CHECK-NEXT: vmov.32 q5[0], r7
+; CHECK-NEXT: vmov.32 q1[3], r4
+; CHECK-NEXT: strd r0, r10, [sp, #24]
+; CHECK-NEXT: vstrw.32 q1, [sp, #76]
+; CHECK-NEXT: vmov q1, q5
+; CHECK-NEXT: vmov s9, r4
+; CHECK-NEXT: vmov.32 q1[1], r7
+; CHECK-NEXT: vdup.32 q6, r7
+; CHECK-NEXT: vmov.f32 s2, s1
+; CHECK-NEXT: vmov.f32 s8, s0
+; CHECK-NEXT: vmov.32 q1[2], r6
+; CHECK-NEXT: vmov q3, q6
+; CHECK-NEXT: vmov q7, q6
+; CHECK-NEXT: vmov.f32 s10, s1
+; CHECK-NEXT: mov.w r8, #4
+; CHECK-NEXT: vmov.32 q1[3], r4
; CHECK-NEXT: vmov.32 q3[0], r4
-; CHECK-NEXT: vmov.32 q2[1], r4
-; CHECK-NEXT: vmov.32 q6[3], r4
-; CHECK-NEXT: vmov.32 q0[3], r4
-; CHECK-NEXT: vmov.f32 s7, s23
-; CHECK-NEXT: str r1, [sp, #72]
-; CHECK-NEXT: vstrw.32 q6, [sp, #124]
+; CHECK-NEXT: vmov.32 q7[1], r4
; CHECK-NEXT: str r1, [r0]
+; CHECK-NEXT: vmov.f32 s11, s3
; CHECK-NEXT: movs r1, #64
+; CHECK-NEXT: strh.w r8, [sp, #390]
+; CHECK-NEXT: vstrw.32 q0, [sp, #44]
; CHECK-NEXT: str r0, [r0]
-; CHECK-NEXT: vstrw.32 q5, [sp, #92]
-; CHECK-NEXT: vstrw.32 q1, [r0]
; CHECK-NEXT: vstrw.32 q2, [r0]
+; CHECK-NEXT: vstrw.32 q7, [r0]
; CHECK-NEXT: vstrw.32 q3, [r0]
-; CHECK-NEXT: vstrw.32 q0, [r0]
+; CHECK-NEXT: vstrw.32 q1, [r0]
; CHECK-NEXT: bl __aeabi_memclr4
-; CHECK-NEXT: vstr s23, [sp, #44]
-; CHECK-NEXT: vmov.32 q7[1], r7
-; CHECK-NEXT: ldr r0, [sp, #44]
-; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload
+; CHECK-NEXT: vmov.32 q5[1], r5
; CHECK-NEXT: vmov.32 q4[1], r4
-; CHECK-NEXT: vmov.32 q7[2], r5
-; CHECK-NEXT: vmov.32 q4[2], r5
-; CHECK-NEXT: vmov.32 q0[0], r6
-; CHECK-NEXT: vmov.32 q4[3], r7
-; CHECK-NEXT: vmov.32 q7[3], r0
+; CHECK-NEXT: vmov.32 q5[2], r7
+; CHECK-NEXT: vmov.32 q4[2], r7
+; CHECK-NEXT: vmov.32 q5[3], r6
+; CHECK-NEXT: vmov.32 q6[0], r10
+; CHECK-NEXT: vmov.32 q4[3], r5
+; CHECK-NEXT: str.w r10, [r9]
; CHECK-NEXT: vstrw.32 q4, [r0]
-; CHECK-NEXT: str.w r6, [r9]
-; CHECK-NEXT: vstrw.32 q0, [r0]
-; CHECK-NEXT: vstrw.32 q7, [r0]
-; CHECK-NEXT: str.w r8, [sp, #356]
+; CHECK-NEXT: vstrw.32 q6, [r0]
+; CHECK-NEXT: vstrw.32 q5, [r0]
+; CHECK-NEXT: str.w r8, [sp, #308]
; CHECK-NEXT: .LBB1_1: @ %for.cond
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
; CHECK-NEXT: b .LBB1_1
More information about the llvm-commits
mailing list