[llvm] r367859 - [AArch64] Implement initial SVE calling convention support

Cullen Rhodes via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 5 06:44:10 PDT 2019


Author: c-rhodes
Date: Mon Aug  5 06:44:10 2019
New Revision: 367859

URL: http://llvm.org/viewvc/llvm-project?rev=367859&view=rev
Log:
[AArch64] Implement initial SVE calling convention support

Summary:

This patch adds initial support for the SVE calling convention such that
SVE types can be passed as arguments and return values to/from a
subroutine.

The SVE AAPCS states [1]:

    z0-z7 are used to pass scalable vector arguments to a subroutine,
    and to return scalable vector results from a function. If a
    subroutine takes arguments in scalable vector or predicate
    registers, or if it is a function that returns results in such
    registers, it must ensure that the entire contents of z8-z23 are
    preserved across the call. In other cases it need only preserve the
    low 64 bits of z8-z15, as described in ยง5.1.2.

    p0-p3 are used to pass scalable predicate arguments to a subroutine
    and to return scalable predicate results from a function. If a
    subroutine takes arguments in scalable vector or predicate
    registers, or if it is a function that returns results in these
    registers, it must ensure that p4-p15 are preserved across the call.
    In other cases it need not preserve any scalable predicate register
    contents.

SVE predicate and data registers are passed indirectly (i.e. spilled to the
stack and pass the address) if they exceed the registers used for argument
passing defined by the PCS referenced above.  Until SVE stack support is merged
we can't spill SVE registers to the stack, so currently an llvm_unreachable is
used where we will eventually handle this.

[1] https://static.docs.arm.com/100986/0000/100986_0000.pdf

Reviewed By: ostannard

Differential Revision: https://reviews.llvm.org/D65448

Added:
    llvm/trunk/test/CodeGen/AArch64/sve-calling-convention.ll
Modified:
    llvm/trunk/include/llvm/IR/CallingConv.h
    llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.td
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp

Modified: llvm/trunk/include/llvm/IR/CallingConv.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/CallingConv.h?rev=367859&r1=367858&r2=367859&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/CallingConv.h (original)
+++ llvm/trunk/include/llvm/IR/CallingConv.h Mon Aug  5 06:44:10 2019
@@ -222,6 +222,9 @@ namespace CallingConv {
     // Calling convention between AArch64 Advanced SIMD functions
     AArch64_VectorCall = 97,
 
+    /// Calling convention between AArch64 SVE functions
+    AArch64_SVE_VectorCall = 98,
+
     /// The highest possible calling convention ID. Must be some 2^k - 1.
     MaxID = 1023
   };

Modified: llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.td?rev=367859&r1=367858&r2=367859&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.td (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64CallingConvention.td Mon Aug  5 06:44:10 2019
@@ -70,6 +70,18 @@ def CC_AArch64_AAPCS : CallingConv<[
 
   CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
 
+  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
+            nxv1f32, nxv2f32, nxv4f32, nxv1f64, nxv2f64],
+           CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
+  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
+            nxv1f32, nxv2f32, nxv4f32, nxv1f64, nxv2f64],
+           CCPassIndirect<i64>>,
+
+  CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
+           CCAssignToReg<[P0, P1, P2, P3]>>,
+  CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
+           CCPassIndirect<i64>>,
+
   // Handle i1, i8, i16, i32, i64, f32, f64 and v2f64 by passing in registers,
   // up to eight each of GPR and FPR.
   CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
@@ -135,7 +147,14 @@ def RetCC_AArch64_AAPCS : CallingConv<[
       CCAssignToRegWithShadow<[D0, D1, D2, D3, D4, D5, D6, D7],
                               [Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
   CCIfType<[f128, v2i64, v4i32, v8i16, v16i8, v4f32, v2f64, v8f16],
-      CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>
+      CCAssignToReg<[Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7]>>,
+
+  CCIfType<[nxv16i8, nxv8i16, nxv4i32, nxv2i64, nxv2f16, nxv4f16, nxv8f16,
+            nxv1f32, nxv2f32, nxv4f32, nxv1f64, nxv2f64],
+           CCAssignToReg<[Z0, Z1, Z2, Z3, Z4, Z5, Z6, Z7]>>,
+
+  CCIfType<[nxv2i1, nxv4i1, nxv8i1, nxv16i1],
+           CCAssignToReg<[P0, P1, P2, P3]>>
 ]>;
 
 // Vararg functions on windows pass floats in integer registers
@@ -325,6 +344,13 @@ def CSR_AArch64_AAVPCS : CalleeSavedRegs
                                           X23, X24, X25, X26, X27, X28,
                                           (sequence "Q%u", 8, 23))>;
 
+// Functions taking SVE arguments or returning an SVE type
+// must (additionally) preserve full Z8-Z23 and predicate registers P4-P15
+def CSR_AArch64_SVE_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
+                                               X23, X24, X25, X26, X27, X28,
+                                               (sequence "Z%u", 8, 23),
+                                               (sequence "P%u", 4, 15))>;
+
 // Constructors and destructors return 'this' in the iOS 64-bit C++ ABI; since
 // 'this' and the pointer return value are both passed in X0 in these cases,
 // this can be partially modelled by treating X0 as a callee-saved register;

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=367859&r1=367858&r2=367859&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Aug  5 06:44:10 2019
@@ -161,6 +161,29 @@ AArch64TargetLowering::AArch64TargetLowe
     addQRTypeForNEON(MVT::v8f16);
   }
 
+  if (Subtarget->hasSVE()) {
+    // Add legal sve predicate types
+    addRegisterClass(MVT::nxv2i1, &AArch64::PPRRegClass);
+    addRegisterClass(MVT::nxv4i1, &AArch64::PPRRegClass);
+    addRegisterClass(MVT::nxv8i1, &AArch64::PPRRegClass);
+    addRegisterClass(MVT::nxv16i1, &AArch64::PPRRegClass);
+
+    // Add legal sve data types
+    addRegisterClass(MVT::nxv16i8, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv8i16, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv4i32, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv2i64, &AArch64::ZPRRegClass);
+
+    addRegisterClass(MVT::nxv2f16, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv4f16, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv8f16, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv1f32, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv2f32, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv4f32, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv1f64, &AArch64::ZPRRegClass);
+    addRegisterClass(MVT::nxv2f64, &AArch64::ZPRRegClass);
+  }
+
   // Compute derived properties from the register classes
   computeRegisterProperties(Subtarget->getRegisterInfo());
 
@@ -3139,6 +3162,11 @@ SDValue AArch64TargetLowering::LowerForm
         RC = &AArch64::FPR64RegClass;
       else if (RegVT == MVT::f128 || RegVT.is128BitVector())
         RC = &AArch64::FPR128RegClass;
+      else if (RegVT.isScalableVector() &&
+               RegVT.getVectorElementType() == MVT::i1)
+        RC = &AArch64::PPRRegClass;
+      else if (RegVT.isScalableVector())
+        RC = &AArch64::ZPRRegClass;
       else
         llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
 
@@ -3154,6 +3182,10 @@ SDValue AArch64TargetLowering::LowerForm
         llvm_unreachable("Unknown loc info!");
       case CCValAssign::Full:
         break;
+      case CCValAssign::Indirect:
+        assert(VA.getValVT().isScalableVector() &&
+               "Only scalable vectors can be passed indirectly");
+        llvm_unreachable("Spilling of SVE vectors not yet implemented");
       case CCValAssign::BCvt:
         ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue);
         break;
@@ -3194,6 +3226,10 @@ SDValue AArch64TargetLowering::LowerForm
       case CCValAssign::BCvt:
         MemVT = VA.getLocVT();
         break;
+      case CCValAssign::Indirect:
+        assert(VA.getValVT().isScalableVector() &&
+               "Only scalable vectors can be passed indirectly");
+        llvm_unreachable("Spilling of SVE vectors not yet implemented");
       case CCValAssign::SExt:
         ExtType = ISD::SEXTLOAD;
         break;
@@ -3779,6 +3815,10 @@ AArch64TargetLowering::LowerCall(CallLow
     case CCValAssign::FPExt:
       Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg);
       break;
+    case CCValAssign::Indirect:
+      assert(VA.getValVT().isScalableVector() &&
+             "Only scalable vectors can be passed indirectly");
+      llvm_unreachable("Spilling of SVE vectors not yet implemented");
     }
 
     if (VA.isRegLoc()) {
@@ -3925,6 +3965,20 @@ AArch64TargetLowering::LowerCall(CallLow
     Ops.push_back(DAG.getRegister(RegToPass.first,
                                   RegToPass.second.getValueType()));
 
+  // Check callee args/returns for SVE registers and set calling convention
+  // accordingly.
+  if (CallConv == CallingConv::C) {
+    bool CalleeOutSVE = any_of(Outs, [](ISD::OutputArg &Out){
+      return Out.VT.isScalableVector();
+    });
+    bool CalleeInSVE = any_of(Ins, [](ISD::InputArg &In){
+      return In.VT.isScalableVector();
+    });
+
+    if (CalleeInSVE || CalleeOutSVE)
+      CallConv = CallingConv::AArch64_SVE_VectorCall;
+  }
+
   // Add a register mask operand representing the call-preserved registers.
   const uint32_t *Mask;
   const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo();

Modified: llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp?rev=367859&r1=367858&r2=367859&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64RegisterInfo.cpp Mon Aug  5 06:44:10 2019
@@ -120,6 +120,8 @@ AArch64RegisterInfo::getCallPreservedMas
                : CSR_AArch64_CXX_TLS_Darwin_RegMask;
   if (CC == CallingConv::AArch64_VectorCall)
     return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
+  if (CC == CallingConv::AArch64_SVE_VectorCall)
+    return CSR_AArch64_SVE_AAPCS_RegMask;
   if (MF.getSubtarget<AArch64Subtarget>().getTargetLowering()
           ->supportSwiftError() &&
       MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))

Added: llvm/trunk/test/CodeGen/AArch64/sve-calling-convention.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/sve-calling-convention.ll?rev=367859&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/sve-calling-convention.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/sve-calling-convention.ll Mon Aug  5 06:44:10 2019
@@ -0,0 +1,121 @@
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -stop-after=finalize-isel < %s | FileCheck %s
+
+; CHECK-LABEL: name: nosve_signature
+define i32 @nosve_signature() nounwind {
+  ret i32 42
+}
+
+; CHECK-LABEL: name: sve_signature_ret_vec
+define <vscale x 4 x i32> @sve_signature_ret_vec() nounwind {
+  ret <vscale x 4 x i32> undef
+}
+
+; CHECK-LABEL: name: sve_signature_ret_pred
+define <vscale x 4 x i1> @sve_signature_ret_pred() nounwind {
+  ret <vscale x 4 x i1> undef
+}
+
+; CHECK-LABEL: name: sve_signature_arg_vec
+define void @sve_signature_arg_vec(<vscale x 4 x i32> %arg) nounwind {
+  ret void
+}
+
+; CHECK-LABEL: name: sve_signature_arg_pred
+define void @sve_signature_arg_pred(<vscale x 4 x i1> %arg) nounwind {
+  ret void
+}
+
+; CHECK-LABEL: name: caller_nosve_signature
+; CHECK: BL @nosve_signature, csr_aarch64_aapcs
+define i32 @caller_nosve_signature() nounwind {
+  %res = call i32 @nosve_signature()
+  ret i32 %res
+}
+
+; CHECK-LABEL: name: sve_signature_ret_vec_caller
+; CHECK: BL @sve_signature_ret_vec, csr_aarch64_sve_aapcs
+define <vscale x 4 x i32>  @sve_signature_ret_vec_caller() nounwind {
+  %res = call <vscale x 4 x i32> @sve_signature_ret_vec()
+  ret <vscale x 4 x i32> %res
+}
+
+; CHECK-LABEL: name: sve_signature_ret_pred_caller
+; CHECK: BL @sve_signature_ret_pred, csr_aarch64_sve_aapcs
+define <vscale x 4 x i1>  @sve_signature_ret_pred_caller() nounwind {
+  %res = call <vscale x 4 x i1> @sve_signature_ret_pred()
+  ret <vscale x 4 x i1> %res
+}
+
+; CHECK-LABEL: name: sve_signature_arg_vec_caller
+; CHECK: BL @sve_signature_arg_vec, csr_aarch64_sve_aapcs
+define void @sve_signature_arg_vec_caller(<vscale x 4 x i32> %arg) nounwind {
+  call void @sve_signature_arg_vec(<vscale x 4 x i32> %arg)
+  ret void
+}
+
+; CHECK-LABEL: name: sve_signature_arg_pred_caller
+; CHECK: BL @sve_signature_arg_pred, csr_aarch64_sve_aapcs
+define void @sve_signature_arg_pred_caller(<vscale x 4 x i1> %arg) nounwind {
+  call void @sve_signature_arg_pred(<vscale x 4 x i1> %arg)
+  ret void
+}
+
+; CHECK-LABEL: name: sve_signature_many_arg_vec
+; CHECK: [[RES:%[0-9]+]]:zpr = COPY $z7
+; CHECK: $z0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $z0
+define <vscale x 4 x i32> @sve_signature_many_arg_vec(<vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2, <vscale x 4 x i32> %arg3, <vscale x 4 x i32> %arg4, <vscale x 4 x i32> %arg5, <vscale x 4 x i32> %arg6, <vscale x 4 x i32> %arg7, <vscale x 4 x i32> %arg8) nounwind {
+  ret <vscale x 4 x i32> %arg8
+}
+
+; CHECK-LABEL: name: sve_signature_many_arg_pred
+; CHECK: [[RES:%[0-9]+]]:ppr = COPY $p3
+; CHECK: $p0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $p0
+define <vscale x 4 x i1> @sve_signature_many_arg_pred(<vscale x 4 x i1> %arg1, <vscale x 4 x i1> %arg2, <vscale x 4 x i1> %arg3, <vscale x 4 x i1> %arg4) nounwind {
+  ret <vscale x 4 x i1> %arg4
+}
+
+; CHECK-LABEL: name: sve_signature_vec
+; CHECK: [[RES:%[0-9]+]]:zpr = COPY $z1
+; CHECK: $z0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $z0
+define <vscale x 4 x i32> @sve_signature_vec(<vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2) nounwind {
+ ret <vscale x 4 x i32> %arg2
+}
+
+; CHECK-LABEL: name: sve_signature_pred
+; CHECK: [[RES:%[0-9]+]]:ppr = COPY $p1
+; CHECK: $p0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $p0
+define <vscale x 4 x i1> @sve_signature_pred(<vscale x 4 x i1> %arg1, <vscale x 4 x i1> %arg2) nounwind {
+  ret <vscale x 4 x i1> %arg2
+}
+
+; CHECK-LABEL: name: sve_signature_vec_caller
+; CHECK-DAG: [[ARG2:%[0-9]+]]:zpr = COPY $z1
+; CHECK-DAG: [[ARG1:%[0-9]+]]:zpr = COPY $z0
+; CHECK-DAG: $z0 = COPY [[ARG2]]
+; CHECK-DAG: $z1 = COPY [[ARG1]]
+; CHECK-NEXT: BL @sve_signature_vec, csr_aarch64_sve_aapcs
+; CHECK: [[RES:%[0-9]+]]:zpr = COPY $z0
+; CHECK: $z0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $z0
+define <vscale x 4 x i32> @sve_signature_vec_caller(<vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2) nounwind {
+  %res = call <vscale x 4 x i32> @sve_signature_vec(<vscale x 4 x i32> %arg2, <vscale x 4 x i32> %arg1)
+  ret <vscale x 4 x i32> %res
+}
+
+; CHECK-LABEL: name: sve_signature_pred_caller
+; CHECK-DAG: [[ARG2:%[0-9]+]]:ppr = COPY $p1
+; CHECK-DAG: [[ARG1:%[0-9]+]]:ppr = COPY $p0
+; CHECK-DAG: $p0 = COPY [[ARG2]]
+; CHECK-DAG: $p1 = COPY [[ARG1]]
+; CHECK-NEXT: BL @sve_signature_pred, csr_aarch64_sve_aapcs
+; CHECK: [[RES:%[0-9]+]]:ppr = COPY $p0
+; CHECK: $p0 = COPY [[RES]]
+; CHECK: RET_ReallyLR implicit $p0
+define <vscale x 4 x i1> @sve_signature_pred_caller(<vscale x 4 x i1> %arg1, <vscale x 4 x i1> %arg2) nounwind {
+  %res = call <vscale x 4 x i1> @sve_signature_pred(<vscale x 4 x i1> %arg2, <vscale x 4 x i1> %arg1)
+  ret <vscale x 4 x i1> %res
+}




More information about the llvm-commits mailing list