r206456 - ARM64: remove holes from *all* HFAs on the stack.

Tim Northover tnorthover at apple.com
Thu Apr 17 03:20:39 PDT 2014


Author: tnorthover
Date: Thu Apr 17 05:20:38 2014
New Revision: 206456

URL: http://llvm.org/viewvc/llvm-project?rev=206456&view=rev
Log:
ARM64: remove holes from *all* HFAs on the stack.

My first attempt to make sure HFAs were contiguous was in the block dealing
with padding registers, which meant it only triggered on the first stack-based
HFA. This should extend it to the rest as well.

Another part of PR19432.

Modified:
    cfe/trunk/include/clang/CodeGen/CGFunctionInfo.h
    cfe/trunk/lib/CodeGen/TargetInfo.cpp
    cfe/trunk/test/CodeGen/arm64-aapcs-arguments.c

Modified: cfe/trunk/include/clang/CodeGen/CGFunctionInfo.h
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/include/clang/CodeGen/CGFunctionInfo.h?rev=206456&r1=206455&r2=206456&view=diff
==============================================================================
--- cfe/trunk/include/clang/CodeGen/CGFunctionInfo.h (original)
+++ cfe/trunk/include/clang/CodeGen/CGFunctionInfo.h Thu Apr 17 05:20:38 2014
@@ -151,6 +151,10 @@ public:
     return PaddingType;
   }
 
+  void setPaddingType(llvm::Type *T) {
+    PaddingType = T;
+  }
+
   bool getPaddingInReg() const {
     return PaddingInReg;
   }

Modified: cfe/trunk/lib/CodeGen/TargetInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/TargetInfo.cpp?rev=206456&r1=206455&r2=206456&view=diff
==============================================================================
--- cfe/trunk/lib/CodeGen/TargetInfo.cpp (original)
+++ cfe/trunk/lib/CodeGen/TargetInfo.cpp Thu Apr 17 05:20:38 2014
@@ -3183,26 +3183,28 @@ private:
       const unsigned NumGPRs = 8;
       it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
                                       AllocatedGPR, IsSmallAggr);
+
+      // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
+      // as sequences of floats since they'll get "holes" inserted as
+      // padding by the back end.
+      if (IsHA && AllocatedVFP > NumVFPs && !isDarwinPCS()) {
+          uint32_t NumStackSlots = getContext().getTypeSize(it->type);
+          NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
+
+          llvm::Type *CoerceTy = llvm::ArrayType::get(
+              llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
+          it->info = ABIArgInfo::getDirect(CoerceTy);
+      }
+
       // If we do not have enough VFP registers for the HA, any VFP registers
       // that are unallocated are marked as unavailable. To achieve this, we add
       // padding of (NumVFPs - PreAllocation) floats.
       if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
         llvm::Type *PaddingTy = llvm::ArrayType::get(
             llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
-        if (isDarwinPCS())
-          it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
-        else {
-          // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
-          // as sequences of floats since they'll get "holes" inserted as
-          // padding by the back end.
-          uint32_t NumStackSlots = getContext().getTypeSize(it->type);
-          NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
-
-          llvm::Type *CoerceTy = llvm::ArrayType::get(
-              llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
-          it->info = ABIArgInfo::getDirect(CoerceTy, 0, PaddingTy);
-        }
+        it->info.setPaddingType(PaddingTy);
       }
+
       // If we do not have enough GPRs for the small aggregate, any GPR regs
       // that are unallocated are marked as unavailable.
       if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {

Modified: cfe/trunk/test/CodeGen/arm64-aapcs-arguments.c
URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/arm64-aapcs-arguments.c?rev=206456&r1=206455&r2=206456&view=diff
==============================================================================
--- cfe/trunk/test/CodeGen/arm64-aapcs-arguments.c (original)
+++ cfe/trunk/test/CodeGen/arm64-aapcs-arguments.c Thu Apr 17 05:20:38 2014
@@ -12,3 +12,12 @@ void test1(int x0, __int128 x2_x3, __int
 // CHECK: void @test2(i32 %x0, i128 %x2_x3.coerce, i32 %x4, i128 %x6_x7.coerce, i32 %sp, i128 %sp16.coerce)
 void test2(int x0, Small x2_x3, int x4, Small x6_x7, int sp, Small sp16) {
 }
+
+// We coerce HFAs into a contiguous [N x double] type if they're going on the
+// stack in order to avoid holes. Make sure we get all of them, and not just the
+// first:
+
+// CHECK: void @test3(float %s0_s3.0, float %s0_s3.1, float %s0_s3.2, float %s0_s3.3, float %s4, [3 x float], [2 x double] %sp.coerce, [2 x double] %sp16.coerce)
+typedef struct { float arr[4]; } HFA;
+void test3(HFA s0_s3, float s4, HFA sp, HFA sp16) {
+}





More information about the cfe-commits mailing list