[llvm] 5144f73 - [AArch64] Fix windows vararg functions with floats in the fixed args

Martin Storsjö via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 15 01:02:45 PDT 2021


Author: Martin Storsjö
Date: 2021-04-15T11:02:14+03:00
New Revision: 5144f730a8a8a4c7c7984ff945187a9aa83c91ac

URL: https://github.com/llvm/llvm-project/commit/5144f730a8a8a4c7c7984ff945187a9aa83c91ac
DIFF: https://github.com/llvm/llvm-project/commit/5144f730a8a8a4c7c7984ff945187a9aa83c91ac.diff

LOG: [AArch64] Fix windows vararg functions with floats in the fixed args

On Windows, float arguments are normally passed in float registers
in the calling convention for regular functions. For variable
argument functions, floats are passed in integer registers. This
already was done correctly since many years.

However, the surprising bit was that floats among the fixed arguments
also are supposed to be passed in integer registers, contrary to regular
functions. (This also seems to be the behaviour on ARM though, both
on Windows, but also on e.g. hardfloat linux.)

In the calling convention, don't promote shorter floats to f64, but
convert them to integers of the same length. (Floats passed as part of
the actual variable arguments are promoted to double already on the
C/Clang level; the LLVM vararg calling convention doesn't do any
extra promotion of f32 to f64 - this matches how it works on X86 too.)

Technically, this is an ABI break compared to older LLVM versions,
but it fixes compatibility with the official platform ABI. (In practice,
floats among the fixed arguments in variable argument functions is
a pretty rare construct.)

Differential Revision: https://reviews.llvm.org/D100365

Added: 
    llvm/test/CodeGen/AArch64/win64_vararg_float.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64CallingConvention.td
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64CallingConvention.td b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
index fdcc890bf5892..6277417401fe6 100644
--- a/llvm/lib/Target/AArch64/AArch64CallingConvention.td
+++ b/llvm/lib/Target/AArch64/AArch64CallingConvention.td
@@ -166,7 +166,8 @@ def RetCC_AArch64_AAPCS : CallingConv<[
 // Vararg functions on windows pass floats in integer registers
 let Entry = 1 in
 def CC_AArch64_Win64_VarArg : CallingConv<[
-  CCIfType<[f16, bf16, f32], CCPromoteToType<f64>>,
+  CCIfType<[f16, bf16], CCBitConvertToType<i16>>,
+  CCIfType<[f32], CCBitConvertToType<i32>>,
   CCIfType<[f64], CCBitConvertToType<i64>>,
   CCDelegateTo<CC_AArch64_AAPCS>
 ]>;

diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 718fc8b7c1d0c..2152256e4cb02 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -4732,7 +4732,10 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
       else if (ActualMVT == MVT::i16)
         ValVT = MVT::i16;
     }
-    CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
+    bool UseVarArgCC = false;
+    if (IsWin64)
+      UseVarArgCC = isVarArg;
+    CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
     bool Res =
         AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
     assert(!Res && "Call operand has unhandled type");
@@ -5362,6 +5365,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
   bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
   bool IsSibCall = false;
+  bool IsWin64 =
+      Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
 
   // Check callee args/returns for SVE registers and set calling convention
   // accordingly.
@@ -5411,8 +5416,12 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
                            "currently not supported");
 
       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
-      CCAssignFn *AssignFn = CCAssignFnForCall(CallConv,
-                                               /*IsVarArg=*/ !Outs[i].IsFixed);
+      bool UseVarArgCC = !Outs[i].IsFixed;
+      // On Windows, the fixed arguments in a vararg call are passed in GPRs
+      // too, so use the vararg CC to force them to integer registers.
+      if (IsWin64)
+        UseVarArgCC = true;
+      CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, UseVarArgCC);
       bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
       assert(!Res && "Call operand has unhandled type");
       (void)Res;

diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
index ef0d4c6ee93c9..17dca37be12b1 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64CallLowering.cpp
@@ -164,11 +164,17 @@ struct ReturnedArgCallReturnHandler : public CallReturnHandler {
 struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
   OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
                      MachineInstrBuilder MIB, CCAssignFn *AssignFn,
-                     CCAssignFn *AssignFnVarArg, bool IsTailCall = false,
-                     int FPDiff = 0)
+                     CCAssignFn *AssignFnVarArg, bool IsVarArg,
+                     bool IsTailCall = false, int FPDiff = 0)
       : OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
         AssignFnVarArg(AssignFnVarArg), IsTailCall(IsTailCall), FPDiff(FPDiff),
-        StackSize(0), SPReg(0) {}
+        StackSize(0), SPReg(0) {
+    MachineFunction &MF = MIRBuilder.getMF();
+    const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
+    bool IsWin =
+        Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
+    UseVarArgsCCForFixed = IsVarArg && IsWin;
+  }
 
   Register getStackAddress(uint64_t Size, int64_t Offset,
                            MachinePointerInfo &MPO,
@@ -240,7 +246,7 @@ struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
                  ISD::ArgFlagsTy Flags,
                  CCState &State) override {
     bool Res;
-    if (Info.IsFixed)
+    if (Info.IsFixed && !UseVarArgsCCForFixed)
       Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
     else
       Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
@@ -252,6 +258,7 @@ struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
   MachineInstrBuilder MIB;
   CCAssignFn *AssignFnVarArg;
   bool IsTailCall;
+  bool UseVarArgsCCForFixed;
 
   /// For tail calls, the byte offset of the call's argument area from the
   /// callee's. Unused elsewhere.
@@ -376,7 +383,8 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
       splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
     }
 
-    OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
+    OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn,
+                               F.isVarArg());
     Success =
         handleAssignments(MIRBuilder, SplitArgs, Handler, CC, F.isVarArg());
   }
@@ -879,7 +887,7 @@ bool AArch64CallLowering::lowerTailCall(
 
   // Do the actual argument marshalling.
   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
-                             AssignFnVarArg, true, FPDiff);
+                             AssignFnVarArg, Info.IsVarArg, true, FPDiff);
   if (!handleAssignments(MIRBuilder, OutArgs, Handler, CalleeCC, Info.IsVarArg))
     return false;
 
@@ -991,7 +999,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
 
   // Do the actual argument marshalling.
   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
-                             AssignFnVarArg, false);
+                             AssignFnVarArg, Info.IsVarArg, false);
   if (!handleAssignments(MIRBuilder, OutArgs, Handler, Info.CallConv,
                          Info.IsVarArg))
     return false;

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
index 9e46f5076bce9..30ca231ba824b 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/call-translator-tail-call.ll
@@ -181,9 +181,9 @@ define void @test_varargs() {
   ; WINDOWS:   [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00
   ; WINDOWS:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
   ; WINDOWS:   $w0 = COPY [[C]](s32)
-  ; WINDOWS:   $d0 = COPY [[C1]](s64)
-  ; WINDOWS:   $x1 = COPY [[C2]](s64)
-  ; WINDOWS:   TCRETURNdi @varargs, 0, csr_aarch64_aapcs, implicit $sp, implicit $w0, implicit $d0, implicit $x1
+  ; WINDOWS:   $x1 = COPY [[C1]](s64)
+  ; WINDOWS:   $x2 = COPY [[C2]](s64)
+  ; WINDOWS:   TCRETURNdi @varargs, 0, csr_aarch64_aapcs, implicit $sp, implicit $w0, implicit $x1, implicit $x2
   tail call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12)
   ret void
 }
@@ -217,10 +217,10 @@ define void @test_varargs_2() {
   ; WINDOWS:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
   ; WINDOWS:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 314
   ; WINDOWS:   $w0 = COPY [[C]](s32)
-  ; WINDOWS:   $d0 = COPY [[C1]](s64)
-  ; WINDOWS:   $x1 = COPY [[C2]](s64)
-  ; WINDOWS:   $x2 = COPY [[C3]](s64)
-  ; WINDOWS:   TCRETURNdi @varargs, 0, csr_aarch64_aapcs, implicit $sp, implicit $w0, implicit $d0, implicit $x1, implicit $x2
+  ; WINDOWS:   $x1 = COPY [[C1]](s64)
+  ; WINDOWS:   $x2 = COPY [[C2]](s64)
+  ; WINDOWS:   $x3 = COPY [[C3]](s64)
+  ; WINDOWS:   TCRETURNdi @varargs, 0, csr_aarch64_aapcs, implicit $sp, implicit $w0, implicit $x1, implicit $x2, implicit $x3
   tail call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12, i64 314)
   ret void
 }
@@ -276,10 +276,10 @@ define void @test_varargs_3([8 x <2 x double>], <4 x half> %arg) {
   ; WINDOWS:   [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
   ; WINDOWS:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 314
   ; WINDOWS:   $w0 = COPY [[C]](s32)
-  ; WINDOWS:   $d0 = COPY [[C1]](s64)
-  ; WINDOWS:   $x1 = COPY [[C2]](s64)
-  ; WINDOWS:   $x2 = COPY [[C3]](s64)
-  ; WINDOWS:   TCRETURNdi @varargs, 0, csr_aarch64_aapcs, implicit $sp, implicit $w0, implicit $d0, implicit $x1, implicit $x2
+  ; WINDOWS:   $x1 = COPY [[C1]](s64)
+  ; WINDOWS:   $x2 = COPY [[C2]](s64)
+  ; WINDOWS:   $x3 = COPY [[C3]](s64)
+  ; WINDOWS:   TCRETURNdi @varargs, 0, csr_aarch64_aapcs, implicit $sp, implicit $w0, implicit $x1, implicit $x2, implicit $x3
   tail call void(i32, double, i64, ...) @varargs(i32 42, double 1.0, i64 12, i64 314)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AArch64/win64_vararg_float.ll b/llvm/test/CodeGen/AArch64/win64_vararg_float.ll
new file mode 100644
index 0000000000000..bb866e4cf6c99
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/win64_vararg_float.ll
@@ -0,0 +1,108 @@
+; RUN: llc < %s -mtriple=aarch64-windows -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,DAGISEL
+; RUN: llc < %s -mtriple=aarch64-windows -verify-machineinstrs -O0 -fast-isel | FileCheck %s --check-prefixes=CHECK,O0
+; RUN: llc < %s -mtriple=aarch64-windows -verify-machineinstrs -O0 -global-isel | FileCheck %s --check-prefixes=CHECK,O0
+
+define void @float_va_fn(float %a, i32 %b, ...) nounwind {
+entry:
+; CHECK-LABEL: float_va_fn:
+; O0: str x7, [sp, #72]
+; O0: str x6, [sp, #64]
+; O0: str x5, [sp, #56]
+; O0: str x4, [sp, #48]
+; O0: str x3, [sp, #40]
+; O0: str x2, [sp, #32]
+; CHECK: fmov s0, w0
+; O0: add x8, sp, #32
+; O0: str x8, [sp, #8]
+; O0: ldr x0, [sp, #8]
+; DAGISEL: add x0, sp, #16
+; DAGISEL: stp x2, x3, [sp, #16]
+; DAGISEL: stp x4, x5, [sp, #32]
+; DAGISEL: stp x6, x7, [sp, #48]
+; CHECK: bl f_va_list
+  %ap = alloca i8*, align 8
+  %0 = bitcast i8** %ap to i8*
+  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
+  call void @llvm.va_start(i8* nonnull %0)
+  %1 = load i8*, i8** %ap, align 8
+  call void @f_va_list(float %a, i8* %1)
+  call void @llvm.va_end(i8* nonnull %0)
+  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  ret void
+}
+
+declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture)
+declare void @llvm.va_start(i8*)
+declare void @f_va_list(float, i8*)
+declare void @llvm.va_end(i8*)
+declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture)
+
+define void @double_va_fn(double %a, i32 %b, ...) nounwind {
+entry:
+; CHECK-LABEL: double_va_fn:
+; O0: str x7, [sp, #72]
+; O0: str x6, [sp, #64]
+; O0: str x5, [sp, #56]
+; O0: str x4, [sp, #48]
+; O0: str x3, [sp, #40]
+; O0: str x2, [sp, #32]
+; CHECK: fmov d0, x0
+; O0: add x8, sp, #32
+; O0: str x8, [sp, #8]
+; O0: ldr x0, [sp, #8]
+; DAGISEL: add x0, sp, #16
+; DAGISEL: stp x2, x3, [sp, #16]
+; DAGISEL: stp x4, x5, [sp, #32]
+; DAGISEL: stp x6, x7, [sp, #48]
+; CHECK: bl d_va_list
+  %ap = alloca i8*, align 8
+  %0 = bitcast i8** %ap to i8*
+  call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0)
+  call void @llvm.va_start(i8* nonnull %0)
+  %1 = load i8*, i8** %ap, align 8
+  call void @d_va_list(double %a, i8* %1)
+  call void @llvm.va_end(i8* nonnull %0)
+  call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0)
+  ret void
+}
+
+declare void @d_va_list(double, i8*)
+
+define void @call_f_va() nounwind {
+entry:
+; CHECK-LABEL: call_f_va:
+; DAGISEL: mov w0, #1065353216
+; FASTISEL: mov w0, #1065353216
+; GISEL: fmov s0, #1.00000000
+; GISEL: fmov w0, s0
+; CHECK: mov w1, #2
+; DAGISEL: mov x2, #4613937818241073152
+; FASTISEL: mov x2, #4613937818241073152
+; GISEL: fmov d0, #3.00000000
+; GISEL: fmov x2, d0
+; CHECK: mov w3, #4
+; CHECK: b other_f_va_fn
+  tail call void (float, i32, ...) @other_f_va_fn(float 1.000000e+00, i32 2, double 3.000000e+00, i32 4)
+  ret void
+}
+
+declare void @other_f_va_fn(float, i32, ...)
+
+define void @call_d_va() nounwind {
+entry:
+; CHECK-LABEL: call_d_va:
+; DAGISEL: mov x0, #4607182418800017408
+; FASTISEL: mov x0, #4607182418800017408
+; GISEL: fmov d0, #1.00000000
+; GISEL: fmov x0, d0
+; CHECK: mov w1, #2
+; DAGISEL: mov x2, #4613937818241073152
+; FASTISEL: mov x2, #4613937818241073152
+; GISEL: fmov d0, #3.00000000
+; CHECK: mov w3, #4
+; CHECK: b other_d_va_fn
+  tail call void (double, i32, ...) @other_d_va_fn(double 1.000000e+00, i32 2, double 3.000000e+00, i32 4) #4
+  ret void
+}
+
+declare dso_local void @other_d_va_fn(double, i32, ...)


        


More information about the llvm-commits mailing list