[llvm] r192678 - Fix MSP430 calling convention to match MSPGCC

Job Noorman jobnoorman at gmail.com
Tue Oct 15 01:19:40 PDT 2013


Author: job
Date: Tue Oct 15 03:19:39 2013
New Revision: 192678

URL: http://llvm.org/viewvc/llvm-project?rev=192678&view=rev
Log:
Fix MSP430 calling convention to match MSPGCC

Added:
    llvm/trunk/test/CodeGen/MSP430/cc_args.ll
    llvm/trunk/test/CodeGen/MSP430/cc_ret.ll
Modified:
    llvm/trunk/lib/Target/MSP430/MSP430CallingConv.td
    llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp

Modified: llvm/trunk/lib/Target/MSP430/MSP430CallingConv.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/MSP430/MSP430CallingConv.td?rev=192678&r1=192677&r2=192678&view=diff
==============================================================================
--- llvm/trunk/lib/Target/MSP430/MSP430CallingConv.td (original)
+++ llvm/trunk/lib/Target/MSP430/MSP430CallingConv.td Tue Oct 15 03:19:39 2013
@@ -23,18 +23,15 @@ def RetCC_MSP430 : CallingConv<[
 //===----------------------------------------------------------------------===//
 // MSP430 Argument Calling Conventions
 //===----------------------------------------------------------------------===//
-def CC_MSP430 : CallingConv<[
+def CC_MSP430_AssignStack : CallingConv<[
   // Pass by value if the byval attribute is given
   CCIfByVal<CCPassByVal<2, 2>>,
 
   // Promote i8 arguments to i16.
   CCIfType<[i8], CCPromoteToType<i16>>,
 
-  // The first 4 integer arguments of non-varargs functions are passed in
-  // integer registers.
-  CCIfNotVarArg<CCIfType<[i16], CCAssignToReg<[R15W, R14W, R13W, R12W]>>>,
-
   // Integer values get stored in stack slots that are 2 bytes in
   // size and 2-byte aligned.
   CCIfType<[i16], CCAssignToStack<2, 2>>
 ]>;
+

Modified: llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp?rev=192678&r1=192677&r2=192678&view=diff
==============================================================================
--- llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/MSP430/MSP430ISelLowering.cpp Tue Oct 15 03:19:39 2013
@@ -250,6 +250,123 @@ getRegForInlineAsmConstraint(const std::
 
 #include "MSP430GenCallingConv.inc"
 
+/// For each argument in a function store the number of pieces it is composed
+/// of.
+template<typename ArgT>
+static void ParseFunctionArgs(const SmallVectorImpl<ArgT> &Args,
+                              SmallVectorImpl<unsigned> &Out) {
+  unsigned CurrentArgIndex = ~0U;
+  for (unsigned i = 0, e = Args.size(); i != e; i++) {
+    if (CurrentArgIndex == Args[i].OrigArgIndex) {
+      Out.back()++;
+    } else {
+      Out.push_back(1);
+      CurrentArgIndex++;
+    }
+  }
+}
+
+static void AnalyzeVarArgs(CCState &State,
+                           const SmallVectorImpl<ISD::OutputArg> &Outs) {
+  State.AnalyzeCallOperands(Outs, CC_MSP430_AssignStack);
+}
+
+static void AnalyzeVarArgs(CCState &State,
+                           const SmallVectorImpl<ISD::InputArg> &Ins) {
+  State.AnalyzeFormalArguments(Ins, CC_MSP430_AssignStack);
+}
+
+/// Analyze incoming and outgoing function arguments. We need custom C++ code
+/// to handle special constraints in the ABI like reversing the order of the
+/// pieces of splitted arguments. In addition, all pieces of a certain argument
+/// have to be passed either using registers or the stack but never mixing both.
+template<typename ArgT>
+static void AnalyzeArguments(CCState &State,
+                             SmallVectorImpl<CCValAssign> &ArgLocs,
+                             const SmallVectorImpl<ArgT> &Args) {
+  static const uint16_t RegList[] = {
+    MSP430::R15W, MSP430::R14W, MSP430::R13W, MSP430::R12W
+  };
+  static const unsigned NbRegs = array_lengthof(RegList);
+
+  if (State.isVarArg()) {
+    AnalyzeVarArgs(State, Args);
+    return;
+  }
+
+  SmallVector<unsigned, 4> ArgsParts;
+  ParseFunctionArgs(Args, ArgsParts);
+
+  unsigned RegsLeft = NbRegs;
+  bool UseStack = false;
+  unsigned ValNo = 0;
+
+  for (unsigned i = 0, e = ArgsParts.size(); i != e; i++) {
+    MVT ArgVT = Args[ValNo].VT;
+    ISD::ArgFlagsTy ArgFlags = Args[ValNo].Flags;
+    MVT LocVT = ArgVT;
+    CCValAssign::LocInfo LocInfo = CCValAssign::Full;
+
+    // Promote i8 to i16
+    if (LocVT == MVT::i8) {
+      LocVT = MVT::i16;
+      if (ArgFlags.isSExt())
+          LocInfo = CCValAssign::SExt;
+      else if (ArgFlags.isZExt())
+          LocInfo = CCValAssign::ZExt;
+      else
+          LocInfo = CCValAssign::AExt;
+    }
+
+    // Handle byval arguments
+    if (ArgFlags.isByVal()) {
+      State.HandleByVal(ValNo++, ArgVT, LocVT, LocInfo, 2, 2, ArgFlags);
+      continue;
+    }
+
+    unsigned Parts = ArgsParts[i];
+
+    if (!UseStack && Parts <= RegsLeft) {
+      unsigned FirstVal = ValNo;
+      for (unsigned j = 0; j < Parts; j++) {
+        unsigned Reg = State.AllocateReg(RegList, NbRegs);
+        State.addLoc(CCValAssign::getReg(ValNo++, ArgVT, Reg, LocVT, LocInfo));
+        RegsLeft--;
+      }
+
+      // Reverse the order of the pieces to agree with the "big endian" format
+      // required in the calling convention ABI.
+      SmallVectorImpl<CCValAssign>::iterator B = ArgLocs.begin() + FirstVal;
+      std::reverse(B, B + Parts);
+    } else {
+      UseStack = true;
+      for (unsigned j = 0; j < Parts; j++)
+        CC_MSP430_AssignStack(ValNo++, ArgVT, LocVT, LocInfo, ArgFlags, State);
+    }
+  }
+}
+
+static void AnalyzeRetResult(CCState &State,
+                             const SmallVectorImpl<ISD::InputArg> &Ins) {
+  State.AnalyzeCallResult(Ins, RetCC_MSP430);
+}
+
+static void AnalyzeRetResult(CCState &State,
+                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
+  State.AnalyzeReturn(Outs, RetCC_MSP430);
+}
+
+template<typename ArgT>
+static void AnalyzeReturnValues(CCState &State,
+                                SmallVectorImpl<CCValAssign> &RVLocs,
+                                const SmallVectorImpl<ArgT> &Args) {
+  AnalyzeRetResult(State, Args);
+
+  // Reverse splitted return values to get the "big endian" format required
+  // to agree with the calling convention ABI.
+  std::reverse(RVLocs.begin(), RVLocs.end());
+}
+
 SDValue
 MSP430TargetLowering::LowerFormalArguments(SDValue Chain,
                                            CallingConv::ID CallConv,
@@ -325,7 +442,7 @@ MSP430TargetLowering::LowerCCCArguments(
   SmallVector<CCValAssign, 16> ArgLocs;
   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
                  getTargetMachine(), ArgLocs, *DAG.getContext());
-  CCInfo.AnalyzeFormalArguments(Ins, CC_MSP430);
+  AnalyzeArguments(CCInfo, ArgLocs, Ins);
 
   // Create frame index for the start of the first vararg value
   if (isVarArg) {
@@ -423,7 +540,7 @@ MSP430TargetLowering::LowerReturn(SDValu
                  getTargetMachine(), RVLocs, *DAG.getContext());
 
   // Analize return values.
-  CCInfo.AnalyzeReturn(Outs, RetCC_MSP430);
+  AnalyzeReturnValues(CCInfo, RVLocs, Outs);
 
   SDValue Flag;
   SmallVector<SDValue, 4> RetOps(1, Chain);
@@ -471,8 +588,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDV
   SmallVector<CCValAssign, 16> ArgLocs;
   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
                  getTargetMachine(), ArgLocs, *DAG.getContext());
-
-  CCInfo.AnalyzeCallOperands(Outs, CC_MSP430);
+  AnalyzeArguments(CCInfo, ArgLocs, Outs);
 
   // Get a count of how many bytes are to be pushed on the stack.
   unsigned NumBytes = CCInfo.getNextStackOffset();
@@ -610,7 +726,7 @@ MSP430TargetLowering::LowerCallResult(SD
   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
                  getTargetMachine(), RVLocs, *DAG.getContext());
 
-  CCInfo.AnalyzeCallResult(Ins, RetCC_MSP430);
+  AnalyzeReturnValues(CCInfo, RVLocs, Ins);
 
   // Copy all of the result registers out of their specified physreg.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {

Added: llvm/trunk/test/CodeGen/MSP430/cc_args.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/cc_args.ll?rev=192678&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/cc_args.ll (added)
+++ llvm/trunk/test/CodeGen/MSP430/cc_args.ll Tue Oct 15 03:19:39 2013
@@ -0,0 +1,118 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+define void @test() #0 {
+entry:
+; CHECK: test:
+
+; CHECK: mov.w #1, r15
+; CHECK: call #f_i16
+  call void @f_i16(i16 1)
+
+; CHECK: mov.w #772, r14
+; CHECK: mov.w #258, r15
+; CHECK: call #f_i32
+  call void @f_i32(i32 16909060)
+
+; CHECK: mov.w #1800, r12
+; CHECK: mov.w #1286, r13
+; CHECK: mov.w #772, r14
+; CHECK: mov.w #258, r15
+; CHECK: call #f_i64
+  call void @f_i64(i64 72623859790382856)
+
+; CHECK: mov.w #772, r14
+; CHECK: mov.w #258, r15
+; CHECK: mov.w #1800, r12
+; CHECK: mov.w #1286, r13
+; CHECK: call #f_i32_i32
+  call void @f_i32_i32(i32 16909060, i32 84281096)
+
+; CHECK: mov.w #1, r15
+; CHECK: mov.w #772, r13
+; CHECK: mov.w #258, r14
+; CHECK: mov.w #2, r12
+; CHECK: call #f_i16_i32_i16
+  call void @f_i16_i32_i16(i16 1, i32 16909060, i16 2)
+
+; CHECK: mov.w #2, 8(r1)
+; CHECK: mov.w #258, 6(r1)
+; CHECK: mov.w #772, 4(r1)
+; CHECK: mov.w #1286, 2(r1)
+; CHECK: mov.w #1800, 0(r1)
+; CHECK: mov.w #1, r15
+; CHECK: call #f_i16_i64_i16
+  call void @f_i16_i64_i16(i16 1, i64 72623859790382856, i16 2)
+
+  ret void
+}
+
+ at g_i16 = common global i16 0, align 2
+ at g_i32 = common global i32 0, align 2
+ at g_i64 = common global i64 0, align 2
+
+define void @f_i16(i16 %a) #0 {
+; CHECK: f_i16:
+; CHECK: mov.w r15, &g_i16
+  store volatile i16 %a, i16* @g_i16, align 2
+  ret void
+}
+
+define void @f_i32(i32 %a) #0 {
+; CHECK: f_i32:
+; CHECK: mov.w r15, &g_i32+2
+; CHECK: mov.w r14, &g_i32
+  store volatile i32 %a, i32* @g_i32, align 2
+  ret void
+}
+
+define void @f_i64(i64 %a) #0 {
+; CHECK: f_i64:
+; CHECK: mov.w r15, &g_i64+6
+; CHECK: mov.w r14, &g_i64+4
+; CHECK: mov.w r13, &g_i64+2
+; CHECK: mov.w r12, &g_i64
+  store volatile i64 %a, i64* @g_i64, align 2
+  ret void
+}
+
+define void @f_i32_i32(i32 %a, i32 %b) #0 {
+; CHECK: f_i32_i32:
+; CHECK: mov.w r15, &g_i32+2
+; CHECK: mov.w r14, &g_i32
+  store volatile i32 %a, i32* @g_i32, align 2
+; CHECK: mov.w r13, &g_i32+2
+; CHECK: mov.w r12, &g_i32
+  store volatile i32 %b, i32* @g_i32, align 2
+  ret void
+}
+
+define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 {
+; CHECK: f_i16_i32_i16:
+; CHECK: mov.w r15, &g_i16
+  store volatile i16 %a, i16* @g_i16, align 2
+; CHECK: mov.w r14, &g_i32+2
+; CHECK: mov.w r13, &g_i32
+  store volatile i32 %b, i32* @g_i32, align 2
+; CHECK: mov.w r12, &g_i16
+  store volatile i16 %c, i16* @g_i16, align 2
+  ret void
+}
+
+define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 {
+; CHECK: f_i16_i64_i16:
+; CHECK: mov.w r15, &g_i16
+  store volatile i16 %a, i16* @g_i16, align 2
+;CHECK: mov.w 10(r4), &g_i64+6
+;CHECK: mov.w 8(r4), &g_i64+4
+;CHECK: mov.w 6(r4), &g_i64+2
+;CHECK: mov.w 4(r4), &g_i64
+  store volatile i64 %b, i64* @g_i64, align 2
+;CHECK: mov.w 12(r4), &g_i16
+  store volatile i16 %c, i16* @g_i16, align 2
+  ret void
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }

Added: llvm/trunk/test/CodeGen/MSP430/cc_ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MSP430/cc_ret.ll?rev=192678&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/MSP430/cc_ret.ll (added)
+++ llvm/trunk/test/CodeGen/MSP430/cc_ret.ll Tue Oct 15 03:19:39 2013
@@ -0,0 +1,61 @@
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16-a0:16:16"
+target triple = "msp430---elf"
+
+define void @test() #0 {
+entry:
+; CHECK: test:
+
+; CHECK: call #f_i16
+; CHECK: mov.w r15, &g_i16
+  %0 = call i16 @f_i16()
+  store volatile i16 %0, i16* @g_i16
+
+; CHECK: call #f_i32
+; CHECK: mov.w r15, &g_i32+2
+; CHECK: mov.w r14, &g_i32
+  %1 = call i32 @f_i32()
+  store volatile i32 %1, i32* @g_i32
+
+; CHECK: call #f_i64
+; CHECK: mov.w r15, &g_i64+6
+; CHECK: mov.w r14, &g_i64+4
+; CHECK: mov.w r13, &g_i64+2
+; CHECK: mov.w r12, &g_i64
+  %2 = call i64 @f_i64()
+  store volatile i64 %2, i64* @g_i64
+
+  ret void
+}
+
+ at g_i16 = common global i16 0, align 2
+ at g_i32 = common global i32 0, align 2
+ at g_i64 = common global i64 0, align 2
+
+define i16 @f_i16() #0 {
+; CHECK: f_i16:
+; CHECK: mov.w #1, r15
+; CHECK: ret
+  ret i16 1
+}
+
+define i32 @f_i32() #0 {
+; CHECK: f_i32:
+; CHECK: mov.w #772, r14
+; CHECK: mov.w #258, r15
+; CHECK: ret
+  ret i32 16909060
+}
+
+define i64 @f_i64() #0 {
+; CHECK: f_i64:
+; CHECK: mov.w #1800, r12
+; CHECK: mov.w #1286, r13
+; CHECK: mov.w #772, r14
+; CHECK: mov.w #258, r15
+; CHECK: ret
+  ret i64 72623859790382856
+}
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }





More information about the llvm-commits mailing list