[llvm] r181972 - Patch number 2 for mips16/32 floating point interoperability stubs.

Reed Kotler rkotler at mips.com
Wed May 15 19:17:43 PDT 2013


Author: rkotler
Date: Wed May 15 21:17:42 2013
New Revision: 181972

URL: http://llvm.org/viewvc/llvm-project?rev=181972&view=rev
Log:
Patch number 2 for mips16/32 floating point interoperability stubs.
This creates stubs that help Mips32 functions call Mips16 
functions which have floating point parameters that are normally passed
in floating point registers.
 

Added:
    llvm/trunk/test/CodeGen/Mips/hf16call32_body.ll
Modified:
    llvm/trunk/lib/Target/Mips/Mips16HardFloat.cpp

Modified: llvm/trunk/lib/Target/Mips/Mips16HardFloat.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/Mips16HardFloat.cpp?rev=181972&r1=181971&r2=181972&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/Mips16HardFloat.cpp (original)
+++ llvm/trunk/lib/Target/Mips/Mips16HardFloat.cpp Wed May 15 21:17:42 2013
@@ -320,7 +320,7 @@ static void assureFPCallStub(Function &F
 
 //
 // Returns of float, double and complex need to be handled with a helper
-// function. The "AndCal" part is coming in a later patch.
+// function.
 //
 static bool fixupFPReturnAndCall
   (Function &F, Module *M,  const MipsSubtarget &Subtarget) {
@@ -378,6 +378,41 @@ static bool fixupFPReturnAndCall
   return Modified;
 }
 
+static void createFPFnStub(Function *F, Module *M, FPParamVariant PV,
+                  const MipsSubtarget &Subtarget ) {
+  bool PicMode = Subtarget.getRelocationModel() == Reloc::PIC_;
+  bool LE = Subtarget.isLittle();
+  LLVMContext &Context = M->getContext();
+  std::string Name = F->getName();
+  std::string SectionName = ".mips16.fn." + Name;
+  std::string StubName = "__fn_stub_" + Name;
+  std::string LocalName = "__fn_local_" + Name;
+  Function *FStub = Function::Create
+    (F->getFunctionType(),
+     Function::ExternalLinkage, StubName, M);
+  FStub->addFnAttr("mips16_fp_stub");
+  FStub->addFnAttr(llvm::Attribute::Naked);
+  FStub->addFnAttr(llvm::Attribute::NoUnwind);
+  FStub->addFnAttr("nomips16");
+  FStub->setSection(SectionName);
+  BasicBlock *BB = BasicBlock::Create(Context, "entry", FStub);
+  InlineAsmHelper IAH(Context, BB);
+  IAH.Out(" .set  macro");
+  if (PicMode) {
+    IAH.Out(".set noreorder");
+    IAH.Out(".cpload  $$2");
+    IAH.Out(".set reorder");
+    IAH.Out(".reloc 0,R_MIPS_NONE," + Name);
+    IAH.Out("la $$25," + LocalName);
+  }
+  else
+    IAH.Out("la $$25, " + Name);
+  swapFPIntParams(PV, M, IAH, LE, false);
+  IAH.Out("jr $$25");
+  IAH.Out(LocalName + " = " + Name);
+  new UnreachableInst(FStub->getContext(), BB);
+}
+
 namespace llvm {
 
 //
@@ -389,10 +424,10 @@ namespace llvm {
 //       by calling a helper function before the actual return.
 //    2) generate helper functions (stubs) that can be called by mips32 functions
 //       that will move parameters passed normally passed in floating point
-//       registers the soft float equivalents. (Coming in a later patch).
+//       registers the soft float equivalents.
 //    3) in the case of static relocation, generate helper functions so that
 //       mips16 functions can call extern functions of unknown type (mips16 or
-//       mips32). (Coming in a later patch).
+//       mips32).
 //    4) TBD. For pic, calls to extern functions of unknown type are handled by
 //       predefined helper functions in libc but this work is currently done
 //       during call lowering but it should be moved here in the future.
@@ -404,6 +439,11 @@ bool Mips16HardFloat::runOnModule(Module
     if (F->isDeclaration() || F->hasFnAttribute("mips16_fp_stub") ||
         F->hasFnAttribute("nomips16")) continue;
     Modified |= fixupFPReturnAndCall(*F, &M, Subtarget);
+    FPParamVariant V = whichFPParamVariantNeeded(*F);
+    if (V != NoSig) {
+      Modified = true;
+      createFPFnStub(F, &M, V, Subtarget);
+    }
   }
   return Modified;
 }

Added: llvm/trunk/test/CodeGen/Mips/hf16call32_body.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/hf16call32_body.ll?rev=181972&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/hf16call32_body.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/hf16call32_body.ll Wed May 15 21:17:42 2013
@@ -0,0 +1,294 @@
+; RUN: llc -mtriple=mipsel-linux-gnu -march=mipsel -mcpu=mips16 -soft-float -mips16-hard-float -relocation-model=static < %s | FileCheck %s -check-prefix=stel
+
+ at x = external global float
+ at xd = external global double
+ at y = external global float
+ at yd = external global double
+ at ret_sf = external global float
+ at ret_df = external global double
+ at ret_sc = external global { float, float }
+ at ret_dc = external global { double, double }
+
+; Function Attrs: nounwind
+define void @v_sf(float %p) #0 {
+entry:
+  %p.addr = alloca float, align 4
+  store float %p, float* %p.addr, align 4
+  %0 = load float* %p.addr, align 4
+  store float %0, float* @x, align 4
+  ret void
+}
+; stel: .section	.mips16.fn.v_sf,"ax", at progbits
+; stel:	.ent	__fn_stub_v_sf
+; stel:		la $25, v_sf
+; stel:		mfc1 $4,$f12
+; stel:		jr $25
+; stel:		__fn_local_v_sf = v_sf
+; stel:	.end	__fn_stub_v_sf
+
+declare i32 @printf(i8*, ...) #1
+
+; Function Attrs: nounwind
+define void @v_df(double %p) #0 {
+entry:
+  %p.addr = alloca double, align 8
+  store double %p, double* %p.addr, align 8
+  %0 = load double* %p.addr, align 8
+  store double %0, double* @xd, align 8
+  ret void
+}
+
+; stel: .section	.mips16.fn.v_df,"ax", at progbits
+; stel:	.ent	__fn_stub_v_df
+; stel:		la $25, v_df
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f13
+; stel:		jr $25
+; stel:		__fn_local_v_df = v_df
+; stel:	.end	__fn_stub_v_df
+
+; Function Attrs: nounwind
+define void @v_sf_sf(float %p1, float %p2) #0 {
+entry:
+  %p1.addr = alloca float, align 4
+  %p2.addr = alloca float, align 4
+  store float %p1, float* %p1.addr, align 4
+  store float %p2, float* %p2.addr, align 4
+  %0 = load float* %p1.addr, align 4
+  store float %0, float* @x, align 4
+  %1 = load float* %p2.addr, align 4
+  store float %1, float* @y, align 4
+  ret void
+}
+
+; stel: .section	.mips16.fn.v_sf_sf,"ax", at progbits
+; stel:	.ent	__fn_stub_v_sf_sf
+; stel:		la $25, v_sf_sf
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f14
+; stel:		jr $25
+; stel:		__fn_local_v_sf_sf = v_sf_sf
+; stel:	.end	__fn_stub_v_sf_sf
+
+; Function Attrs: nounwind
+define void @v_sf_df(float %p1, double %p2) #0 {
+entry:
+  %p1.addr = alloca float, align 4
+  %p2.addr = alloca double, align 8
+  store float %p1, float* %p1.addr, align 4
+  store double %p2, double* %p2.addr, align 8
+  %0 = load float* %p1.addr, align 4
+  store float %0, float* @x, align 4
+  %1 = load double* %p2.addr, align 8
+  store double %1, double* @yd, align 8
+  ret void
+}
+
+; stel: .section	.mips16.fn.v_sf_df,"ax", at progbits
+; stel:	.ent	__fn_stub_v_sf_df
+; stel:		la $25, v_sf_df
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $6,$f14
+; stel:		mfc1 $7,$f15
+; stel:		jr $25
+; stel:		__fn_local_v_sf_df = v_sf_df
+; stel:	.end	__fn_stub_v_sf_df
+
+; Function Attrs: nounwind
+define void @v_df_sf(double %p1, float %p2) #0 {
+entry:
+  %p1.addr = alloca double, align 8
+  %p2.addr = alloca float, align 4
+  store double %p1, double* %p1.addr, align 8
+  store float %p2, float* %p2.addr, align 4
+  %0 = load double* %p1.addr, align 8
+  store double %0, double* @xd, align 8
+  %1 = load float* %p2.addr, align 4
+  store float %1, float* @y, align 4
+  ret void
+}
+
+; stel: .section	.mips16.fn.v_df_sf,"ax", at progbits
+; stel:	.ent	__fn_stub_v_df_sf
+; stel:		la $25, v_df_sf
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f13
+; stel:		mfc1 $6,$f14
+; stel:		jr $25
+; stel:		__fn_local_v_df_sf = v_df_sf
+; stel:	.end	__fn_stub_v_df_sf
+
+; Function Attrs: nounwind
+define void @v_df_df(double %p1, double %p2) #0 {
+entry:
+  %p1.addr = alloca double, align 8
+  %p2.addr = alloca double, align 8
+  store double %p1, double* %p1.addr, align 8
+  store double %p2, double* %p2.addr, align 8
+  %0 = load double* %p1.addr, align 8
+  store double %0, double* @xd, align 8
+  %1 = load double* %p2.addr, align 8
+  store double %1, double* @yd, align 8
+  ret void
+}
+
+; stel: .section	.mips16.fn.v_df_df,"ax", at progbits
+; stel:	.ent	__fn_stub_v_df_df
+; stel:		la $25, v_df_df
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f13
+; stel:		mfc1 $6,$f14
+; stel:		mfc1 $7,$f15
+; stel:		jr $25
+; stel:		__fn_local_v_df_df = v_df_df
+; stel:	.end	__fn_stub_v_df_df
+
+; Function Attrs: nounwind
+define float @sf_v() #0 {
+entry:
+  %0 = load float* @ret_sf, align 4
+  ret float %0
+}
+
+; Function Attrs: nounwind
+define float @sf_sf(float %p) #0 {
+entry:
+  %p.addr = alloca float, align 4
+  store float %p, float* %p.addr, align 4
+  %0 = load float* %p.addr, align 4
+  store float %0, float* @x, align 4
+  %1 = load float* @ret_sf, align 4
+  ret float %1
+}
+
+
+; stel: .section	.mips16.fn.sf_sf,"ax", at progbits
+; stel:	.ent	__fn_stub_sf_sf
+; stel:		la $25, sf_sf
+; stel:		mfc1 $4,$f12
+; stel:		jr $25
+; stel:		__fn_local_sf_sf = sf_sf
+; stel:	.end	__fn_stub_sf_sf
+
+
+; Function Attrs: nounwind
+define float @sf_df(double %p) #0 {
+entry:
+  %p.addr = alloca double, align 8
+  store double %p, double* %p.addr, align 8
+  %0 = load double* %p.addr, align 8
+  store double %0, double* @xd, align 8
+  %1 = load float* @ret_sf, align 4
+  ret float %1
+}
+
+; stel: .section	.mips16.fn.sf_df,"ax", at progbits
+; stel:	.ent	__fn_stub_sf_df
+; stel:		la $25, sf_df
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f13
+; stel:		jr $25
+; stel:		__fn_local_sf_df = sf_df
+; stel:	.end	__fn_stub_sf_df
+
+; Function Attrs: nounwind
+define float @sf_sf_sf(float %p1, float %p2) #0 {
+entry:
+  %p1.addr = alloca float, align 4
+  %p2.addr = alloca float, align 4
+  store float %p1, float* %p1.addr, align 4
+  store float %p2, float* %p2.addr, align 4
+  %0 = load float* %p1.addr, align 4
+  store float %0, float* @x, align 4
+  %1 = load float* %p2.addr, align 4
+  store float %1, float* @y, align 4
+  %2 = load float* @ret_sf, align 4
+  ret float %2
+}
+
+; stel: .section	.mips16.fn.sf_sf_sf,"ax", at progbits
+; stel:	.ent	__fn_stub_sf_sf_sf
+; stel:		la $25, sf_sf_sf
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f14
+; stel:		jr $25
+; stel:		__fn_local_sf_sf_sf = sf_sf_sf
+; stel:	.end	__fn_stub_sf_sf_sf
+
+; Function Attrs: nounwind
+define float @sf_sf_df(float %p1, double %p2) #0 {
+entry:
+  %p1.addr = alloca float, align 4
+  %p2.addr = alloca double, align 8
+  store float %p1, float* %p1.addr, align 4
+  store double %p2, double* %p2.addr, align 8
+  %0 = load float* %p1.addr, align 4
+  store float %0, float* @x, align 4
+  %1 = load double* %p2.addr, align 8
+  store double %1, double* @yd, align 8
+  %2 = load float* @ret_sf, align 4
+  ret float %2
+}
+
+; stel: .section	.mips16.fn.sf_sf_df,"ax", at progbits
+; stel:	.ent	__fn_stub_sf_sf_df
+; stel:		la $25, sf_sf_df
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $6,$f14
+; stel:		mfc1 $7,$f15
+; stel:		jr $25
+; stel:		__fn_local_sf_sf_df = sf_sf_df
+; stel:	.end	__fn_stub_sf_sf_df
+
+; Function Attrs: nounwind
+define float @sf_df_sf(double %p1, float %p2) #0 {
+entry:
+  %p1.addr = alloca double, align 8
+  %p2.addr = alloca float, align 4
+  store double %p1, double* %p1.addr, align 8
+  store float %p2, float* %p2.addr, align 4
+  %0 = load double* %p1.addr, align 8
+  store double %0, double* @xd, align 8
+  %1 = load float* %p2.addr, align 4
+  store float %1, float* @y, align 4
+  %2 = load float* @ret_sf, align 4
+  ret float %2
+}
+
+; stel: .section	.mips16.fn.sf_df_sf,"ax", at progbits
+; stel:	.ent	__fn_stub_sf_df_sf
+; stel:		la $25, sf_df_sf
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f13
+; stel:		mfc1 $6,$f14
+; stel:		jr $25
+; stel:		__fn_local_sf_df_sf = sf_df_sf
+; stel:	.end	__fn_stub_sf_df_sf
+
+; Function Attrs: nounwind
+define float @sf_df_df(double %p1, double %p2) #0 {
+entry:
+  %p1.addr = alloca double, align 8
+  %p2.addr = alloca double, align 8
+  store double %p1, double* %p1.addr, align 8
+  store double %p2, double* %p2.addr, align 8
+  %0 = load double* %p1.addr, align 8
+  store double %0, double* @xd, align 8
+  %1 = load double* %p2.addr, align 8
+  store double %1, double* @yd, align 8
+  %2 = load float* @ret_sf, align 4
+  ret float %2
+}
+
+; stel: .section	.mips16.fn.sf_df_df,"ax", at progbits
+; stel:	.ent	__fn_stub_sf_df_df
+; stel:		la $25, sf_df_df
+; stel:		mfc1 $4,$f12
+; stel:		mfc1 $5,$f13
+; stel:		mfc1 $6,$f14
+; stel:		mfc1 $7,$f15
+; stel:		jr $25
+; stel:		__fn_local_sf_df_df = sf_df_df
+; stel:	.end	__fn_stub_sf_df_df
+
+attributes #0 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf"="true" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }





More information about the llvm-commits mailing list