[cfe-commits] r116696 - in /cfe/trunk: lib/CodeGen/TargetInfo.cpp test/CodeGen/x86_32-arguments-darwin.c test/CodeGen/x86_32-arguments-linux.c

Daniel Dunbar daniel at zuster.org
Mon Oct 18 16:16:50 PDT 2010


Hi Bill,

On Sun, Oct 17, 2010 at 8:41 PM, Bill Wendling <isanbard at gmail.com> wrote:
> Author: void
> Date: Sun Oct 17 22:41:31 2010
> New Revision: 116696
>
> URL: http://llvm.org/viewvc/llvm-project?rev=116696&view=rev
> Log:
> Reapply r116684 with fixes. The test cases needed to be updated.

I like the general approach, but I don't think any of the changes to
propagate neededMMX are, err, needed. Actually, I think they are wrong
because we won't stop using registers for varargs at the right point
any more unless I am misreading things.

 - Daniel

> Modified:
>    cfe/trunk/lib/CodeGen/TargetInfo.cpp
>    cfe/trunk/test/CodeGen/x86_32-arguments-darwin.c
>    cfe/trunk/test/CodeGen/x86_32-arguments-linux.c
>
> Modified: cfe/trunk/lib/CodeGen/TargetInfo.cpp
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/TargetInfo.cpp?rev=116696&r1=116695&r2=116696&view=diff
> ==============================================================================
> --- cfe/trunk/lib/CodeGen/TargetInfo.cpp (original)
> +++ cfe/trunk/lib/CodeGen/TargetInfo.cpp Sun Oct 17 22:41:31 2010
> @@ -331,6 +331,16 @@
>           ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
>  }
>
> +/// UseX86_MMXType - Return true if this is an MMX type that should use the special
> +/// x86_mmx type.
> +bool UseX86_MMXType(const llvm::Type *IRType) {
> +  // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
> +  // special x86_mmx type.
> +  return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
> +    cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
> +    IRType->getScalarSizeInBits() != 64;
> +}
> +
>  //===----------------------------------------------------------------------===//
>  // X86-32 ABI Implementation
>  //===----------------------------------------------------------------------===//
> @@ -658,6 +668,13 @@
>         return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
>                                                             Size));
>     }
> +
> +    const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
> +    if (UseX86_MMXType(IRType)) {
> +      ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
> +      AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
> +      return AAI;
> +    }
>
>     return ABIArgInfo::getDirect();
>   }
> @@ -814,8 +831,10 @@
>
>   ABIArgInfo classifyReturnType(QualType RetTy) const;
>
> -  ABIArgInfo classifyArgumentType(QualType Ty, unsigned &neededInt,
> -                                  unsigned &neededSSE) const;
> +  ABIArgInfo classifyArgumentType(QualType Ty,
> +                                  unsigned &neededInt,
> +                                  unsigned &neededSSE,
> +                                  unsigned &neededMMX) const;
>
>  public:
>   X86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
> @@ -1662,7 +1681,8 @@
>  }
>
>  ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned &neededInt,
> -                                               unsigned &neededSSE) const {
> +                                               unsigned &neededSSE,
> +                                               unsigned &neededMMX) const {
>   X86_64ABIInfo::Class Lo, Hi;
>   classify(Ty, 0, Lo, Hi);
>
> @@ -1673,6 +1693,7 @@
>
>   neededInt = 0;
>   neededSSE = 0;
> +  neededMMX = 0;
>   const llvm::Type *ResType = 0;
>   switch (Lo) {
>   case NoClass:
> @@ -1724,11 +1745,20 @@
>     // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
>     // available SSE register is used, the registers are taken in the
>     // order from %xmm0 to %xmm7.
> -  case SSE:
> -    ++neededSSE;
> -    ResType = GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 0, Ty, 0);
> +  case SSE: {
> +    const llvm::Type *IRType = CGT.ConvertTypeRecursive(Ty);
> +    if (Hi != NoClass || !UseX86_MMXType(IRType)) {
> +      ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
> +      ++neededSSE;
> +    } else {
> +      // This is an MMX type. Treat it as such.
> +      ResType = llvm::Type::getX86_MMXTy(getVMContext());
> +      ++neededMMX;
> +    }
> +
>     break;
>   }
> +  }
>
>   const llvm::Type *HighPart = 0;
>   switch (Hi) {
> @@ -1787,7 +1817,7 @@
>   FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
>
>   // Keep track of the number of assigned registers.
> -  unsigned freeIntRegs = 6, freeSSERegs = 8;
> +  unsigned freeIntRegs = 6, freeSSERegs = 8, freeMMXRegs = 8;
>
>   // If the return value is indirect, then the hidden argument is consuming one
>   // integer register.
> @@ -1798,16 +1828,18 @@
>   // get assigned (in left-to-right order) for passing as follows...
>   for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
>        it != ie; ++it) {
> -    unsigned neededInt, neededSSE;
> -    it->info = classifyArgumentType(it->type, neededInt, neededSSE);
> +    unsigned neededInt, neededSSE, neededMMX;
> +    it->info = classifyArgumentType(it->type, neededInt, neededSSE, neededMMX);
>
>     // AMD64-ABI 3.2.3p3: If there are no registers available for any
>     // eightbyte of an argument, the whole argument is passed on the
>     // stack. If registers have already been assigned for some
>     // eightbytes of such an argument, the assignments get reverted.
> -    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
> +    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE &&
> +        freeMMXRegs >= neededMMX) {
>       freeIntRegs -= neededInt;
>       freeSSERegs -= neededSSE;
> +      freeMMXRegs -= neededMMX;
>     } else {
>       it->info = getIndirectResult(it->type);
>     }
> @@ -1876,10 +1908,13 @@
>   //   i8* overflow_arg_area;
>   //   i8* reg_save_area;
>   // };
> -  unsigned neededInt, neededSSE;
> +  unsigned neededInt, neededSSE, neededMMX;
>
>   Ty = CGF.getContext().getCanonicalType(Ty);
> -  ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE);
> +  ABIArgInfo AI = classifyArgumentType(Ty, neededInt, neededSSE, neededMMX);
> +
> +  // Lump the MMX in with SSE.
> +  neededSSE += neededMMX;
>
>   // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
>   // in the registers. If not go to step 7.
>
> Modified: cfe/trunk/test/CodeGen/x86_32-arguments-darwin.c
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/x86_32-arguments-darwin.c?rev=116696&r1=116695&r2=116696&view=diff
> ==============================================================================
> --- cfe/trunk/test/CodeGen/x86_32-arguments-darwin.c (original)
> +++ cfe/trunk/test/CodeGen/x86_32-arguments-darwin.c Sun Oct 17 22:41:31 2010
> @@ -230,7 +230,7 @@
>
>  // CHECK: define void @f56(
>  // CHECK: i8 signext %a0, %struct.s56_0* byval %a1,
> -// CHECK: <2 x i32> %a2, %struct.s56_1* byval align 4,
> +// CHECK: x86_mmx %a2.coerce, %struct.s56_1* byval align 4,
>  // CHECK: i64 %a4.coerce, %struct.s56_2* byval align 4,
>  // CHECK: <4 x i32> %a6, %struct.s39* byval align 16 %a7,
>  // CHECK: <2 x double> %a8, %struct.s56_4* byval align 16 %a9,
> @@ -239,7 +239,7 @@
>
>  // CHECK:   call void (i32, ...)* @f56_0(i32 1,
>  // CHECK: i32 %{{[^ ]*}}, %struct.s56_0* byval %{{[^ ]*}},
> -// CHECK: <2 x i32> %{{[^ ]*}}, %struct.s56_1* byval align 4 %{{[^ ]*}},
> +// CHECK: x86_mmx %{{[^ ]*}}, %struct.s56_1* byval align 4 %{{[^ ]*}},
>  // CHECK: i64 %{{[^ ]*}}, %struct.s56_2* byval align 4 %{{[^ ]*}},
>  // CHECK: <4 x i32> %{{[^ ]*}}, %struct.s39* byval align 16 %{{[^ ]*}},
>  // CHECK: <2 x double> %{{[^ ]*}}, %struct.s56_4* byval align 16 %{{[^ ]*}},
>
> Modified: cfe/trunk/test/CodeGen/x86_32-arguments-linux.c
> URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/CodeGen/x86_32-arguments-linux.c?rev=116696&r1=116695&r2=116696&view=diff
> ==============================================================================
> --- cfe/trunk/test/CodeGen/x86_32-arguments-linux.c (original)
> +++ cfe/trunk/test/CodeGen/x86_32-arguments-linux.c Sun Oct 17 22:41:31 2010
> @@ -3,7 +3,7 @@
>
>  // CHECK: define void @f56(
>  // CHECK: i8 signext %a0, %struct.s56_0* byval %a1,
> -// CHECK: <2 x i32> %a2, %struct.s56_1* byval align 4,
> +// CHECK: x86_mmx %a2.coerce, %struct.s56_1* byval align 4,
>  // CHECK: <1 x double> %a4, %struct.s56_2* byval align 4,
>  // CHECK: <4 x i32> %a6, %struct.s56_3* byval align 4,
>  // CHECK: <2 x double> %a8, %struct.s56_4* byval align 4,
> @@ -12,7 +12,7 @@
>
>  // CHECK: call void (i32, ...)* @f56_0(i32 1,
>  // CHECK: i32 %{{.*}}, %struct.s56_0* byval %{{[^ ]*}},
> -// CHECK: <2 x i32> %{{[^ ]*}}, %struct.s56_1* byval align 4 %{{[^ ]*}},
> +// CHECK: x86_mmx %{{[^ ]*}}, %struct.s56_1* byval align 4 %{{[^ ]*}},
>  // CHECK: <1 x double> %{{[^ ]*}}, %struct.s56_2* byval align 4 %{{[^ ]*}},
>  // CHECK: <4 x i32> %{{[^ ]*}}, %struct.s56_3* byval align 4 %{{[^ ]*}},
>  // CHECK: <2 x double> %{{[^ ]*}}, %struct.s56_4* byval align 4 %{{[^ ]*}},
>
>
> _______________________________________________
> cfe-commits mailing list
> cfe-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/cfe-commits
>




More information about the cfe-commits mailing list