[llvm-commits] [dragonegg] r157918 - in /dragonegg/trunk: Makefile include/dragonegg/ABI.h include/dragonegg/ConstantConversion.h include/dragonegg/Constants.h include/dragonegg/TypeConversion.h include/dragonegg/Types.h src/Backend.cpp src/ConstantConversion.cpp src/Constants.cpp src/Convert.cpp src/TypeConversion.cpp src/Types.cpp
Duncan Sands
baldrick at free.fr
Mon Jun 4 00:54:40 PDT 2012
Author: baldrick
Date: Mon Jun 4 02:54:40 2012
New Revision: 157918
URL: http://llvm.org/viewvc/llvm-project?rev=157918&view=rev
Log:
Rename Types.cpp to TypeConversion.cpp, Constants.cpp to ConstantConversion.cpp
to avoid confusion with the LLVM files of the same name when in the debugger.
This leaves dragonegg with a not very uniform naming scheme, but I plan to fix
that progressively.
Added:
dragonegg/trunk/include/dragonegg/ConstantConversion.h
- copied, changed from r157915, dragonegg/trunk/include/dragonegg/Constants.h
dragonegg/trunk/include/dragonegg/TypeConversion.h
- copied, changed from r157915, dragonegg/trunk/include/dragonegg/Types.h
dragonegg/trunk/src/ConstantConversion.cpp
- copied, changed from r157915, dragonegg/trunk/src/Constants.cpp
dragonegg/trunk/src/TypeConversion.cpp
- copied, changed from r157915, dragonegg/trunk/src/Types.cpp
Removed:
dragonegg/trunk/include/dragonegg/Constants.h
dragonegg/trunk/include/dragonegg/Types.h
dragonegg/trunk/src/Constants.cpp
dragonegg/trunk/src/Types.cpp
Modified:
dragonegg/trunk/Makefile
dragonegg/trunk/include/dragonegg/ABI.h
dragonegg/trunk/src/Backend.cpp
dragonegg/trunk/src/Convert.cpp
Modified: dragonegg/trunk/Makefile
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/Makefile?rev=157918&r1=157917&r2=157918&view=diff
==============================================================================
--- dragonegg/trunk/Makefile (original)
+++ dragonegg/trunk/Makefile Mon Jun 4 02:54:40 2012
@@ -51,8 +51,8 @@
LLVM_VERSION:=$(shell $(LLVM_CONFIG) --version)
PLUGIN=dragonegg.so
-PLUGIN_OBJECTS=Aliasing.o Backend.o Cache.o Constants.o Convert.o Debug.o \
- DefaultABI.o Trees.o Types.o bits_and_bobs.o
+PLUGIN_OBJECTS=Aliasing.o Backend.o Cache.o ConstantConversion.o Convert.o \
+ Debug.o DefaultABI.o Trees.o TypeConversion.o bits_and_bobs.o
TARGET_OBJECT=Target.o
TARGET_SOURCE=$(SRC_DIR)/$(shell $(TARGET_UTIL) -p)/Target.cpp
Modified: dragonegg/trunk/include/dragonegg/ABI.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/ABI.h?rev=157918&r1=157917&r2=157918&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/ABI.h (original)
+++ dragonegg/trunk/include/dragonegg/ABI.h Mon Jun 4 02:54:40 2012
@@ -28,7 +28,7 @@
// Plugin headers
#include "dragonegg/Internals.h"
#include "dragonegg/Target.h"
-#include "dragonegg/Types.h"
+#include "dragonegg/TypeConversion.h"
// LLVM headers
#include "llvm/LLVMContext.h"
Copied: dragonegg/trunk/include/dragonegg/ConstantConversion.h (from r157915, dragonegg/trunk/include/dragonegg/Constants.h)
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/ConstantConversion.h?p2=dragonegg/trunk/include/dragonegg/ConstantConversion.h&p1=dragonegg/trunk/include/dragonegg/Constants.h&r1=157915&r2=157918&rev=157918&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/Constants.h (original)
+++ dragonegg/trunk/include/dragonegg/ConstantConversion.h Mon Jun 4 02:54:40 2012
@@ -1,4 +1,4 @@
-//=----- Constants.h - Converting and working with constants ------*- C++ -*-=//
+//=- ConstantConversion.h - Converting and working with constants -*- C++ -*-=//
//
// Copyright (C) 2011 to 2012 Duncan Sands.
//
Removed: dragonegg/trunk/include/dragonegg/Constants.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/Constants.h?rev=157917&view=auto
==============================================================================
--- dragonegg/trunk/include/dragonegg/Constants.h (original)
+++ dragonegg/trunk/include/dragonegg/Constants.h (removed)
@@ -1,62 +0,0 @@
-//=----- Constants.h - Converting and working with constants ------*- C++ -*-=//
-//
-// Copyright (C) 2011 to 2012 Duncan Sands.
-//
-// This file is part of DragonEgg.
-//
-// DragonEgg is free software; you can redistribute it and/or modify it under
-// the terms of the GNU General Public License as published by the Free Software
-// Foundation; either version 2, or (at your option) any later version.
-//
-// DragonEgg is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-// A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along with
-// DragonEgg; see the file COPYING. If not, write to the Free Software
-// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
-//
-//===----------------------------------------------------------------------===//
-// This file declares functions for converting GCC constants to LLVM and working
-// with them.
-//===----------------------------------------------------------------------===//
-
-#ifndef DRAGONEGG_CONSTANTS_H
-#define DRAGONEGG_CONSTANTS_H
-
-// Forward declarations.
-namespace llvm {
- class Constant;
- class Type;
-}
-union tree_node;
-
-/// AddressOf - Given an expression with a constant address such as a constant,
-/// a global variable or a label, returns the address. The type of the returned
-/// is always a pointer type and, as long as 'exp' does not have void type, the
-/// type of the pointee is the memory type that corresponds to the type of exp
-/// (see ConvertType).
-extern llvm::Constant *AddressOf(tree_node *exp);
-
-/// ConvertInitializer - Convert the initial value for a global variable to an
-/// equivalent LLVM constant. Also handles constant constructors. The type of
-/// the returned value may be pretty much anything. All that is guaranteed is
-/// that its alloc size is equal to the size of the initial value and that its
-/// alignment is less than or equal to the initial value's GCC type alignment
-/// (here the GCC type means the main variant). Note that the GCC type may have
-/// variable size or no size in which case the size is determined by the initial
-/// value. When this happens the size of the initial value may exceed the alloc
-/// size of the LLVM memory type generated for the GCC type (see ConvertType);
-/// it is never smaller than the alloc size.
-extern llvm::Constant *ConvertInitializer(tree_node *exp);
-
-/// ExtractRegisterFromConstant - Extract a value of the given scalar GCC type
-/// from a constant. The returned value is of in-register type, as returned by
-/// getRegType, and is what you would get by storing the constant to memory and
-/// using LoadRegisterFromMemory to load a register value back out starting from
-/// byte StartingByte.
-extern llvm::Constant *ExtractRegisterFromConstant(llvm::Constant *C,
- tree_node *type,
- int StartingByte = 0);
-
-#endif /* DRAGONEGG_CONSTANTS_H */
Copied: dragonegg/trunk/include/dragonegg/TypeConversion.h (from r157915, dragonegg/trunk/include/dragonegg/Types.h)
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/TypeConversion.h?p2=dragonegg/trunk/include/dragonegg/TypeConversion.h&p1=dragonegg/trunk/include/dragonegg/Types.h&r1=157915&r2=157918&rev=157918&view=diff
==============================================================================
--- dragonegg/trunk/include/dragonegg/Types.h (original)
+++ dragonegg/trunk/include/dragonegg/TypeConversion.h Mon Jun 4 02:54:40 2012
@@ -1,4 +1,4 @@
-//=---------- Types.h - Converting and working with types ---------*- C++ -*-=//
+//=----- TypeConversion.h - Converting and working with types -----*- C++ -*-=//
//
// Copyright (C) 2011 to 2012 Duncan Sands.
//
Removed: dragonegg/trunk/include/dragonegg/Types.h
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/include/dragonegg/Types.h?rev=157917&view=auto
==============================================================================
--- dragonegg/trunk/include/dragonegg/Types.h (original)
+++ dragonegg/trunk/include/dragonegg/Types.h (removed)
@@ -1,106 +0,0 @@
-//=---------- Types.h - Converting and working with types ---------*- C++ -*-=//
-//
-// Copyright (C) 2011 to 2012 Duncan Sands.
-//
-// This file is part of DragonEgg.
-//
-// DragonEgg is free software; you can redistribute it and/or modify it under
-// the terms of the GNU General Public License as published by the Free Software
-// Foundation; either version 2, or (at your option) any later version.
-//
-// DragonEgg is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-// A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along with
-// DragonEgg; see the file COPYING. If not, write to the Free Software
-// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
-//
-//===----------------------------------------------------------------------===//
-// This file declares functions for converting GCC types to LLVM types, and for
-// working with types.
-//===----------------------------------------------------------------------===//
-
-#ifndef DRAGONEGG_TYPES_H
-#define DRAGONEGG_TYPES_H
-
-// LLVM headers
-#include "llvm/CallingConv.h"
-
-// Forward declarations.
-namespace llvm {
- class AttrListPtr;
- class FunctionType;
- class LLVMContext;
- class Type;
-}
-union tree_node;
-
-//===----------------------------------------------------------------------===//
-// Utilities
-//===----------------------------------------------------------------------===//
-
-#define NO_LENGTH (~(uint64_t)0)
-
-/// ArrayLengthOf - Returns the length of the given gcc array type, or NO_LENGTH
-/// if the array has variable or unknown length.
-extern uint64_t ArrayLengthOf(tree_node *type);
-
-/// GetFieldIndex - Return the index of the field in the given LLVM type that
-/// corresponds to the GCC field declaration 'decl'. This means that the LLVM
-/// and GCC fields start in the same byte (if 'decl' is a bitfield, this means
-/// that its first bit is within the byte the LLVM field starts at). Returns
-/// INT_MAX if there is no such LLVM field.
-int GetFieldIndex(tree_node *decl, llvm::Type *Ty);
-
-/// GetUnitType - Returns an integer one address unit wide if 'NumUnits' is 1;
-/// otherwise returns an array of such integers with 'NumUnits' elements. For
-/// example, on a machine which has 16 bit bytes returns an i16 or an array of
-/// i16.
-extern llvm::Type *GetUnitType(llvm::LLVMContext &C, unsigned NumUnits = 1);
-
-/// GetUnitPointerType - Returns an LLVM pointer type which points to memory one
-/// address unit wide. For example, on a machine which has 16 bit bytes returns
-/// an i16*.
-extern llvm::Type *GetUnitPointerType(llvm::LLVMContext &C,
- unsigned AddrSpace = 0);
-
-/// isSizeCompatible - Return true if the specified gcc type is guaranteed to be
-/// turned by ConvertType into an LLVM type of the same size (i.e. TYPE_SIZE the
-/// same as getTypeAllocSizeInBits).
-extern bool isSizeCompatible(tree_node *type);
-
-/// getRegType - Returns the LLVM type to use for registers that hold a value
-/// of the scalar GCC type 'type'. All of the EmitReg* routines use this to
-/// determine the LLVM type to return. Note that this only considers the main
-/// variant of the type.
-extern llvm::Type *getRegType(tree_node *type);
-
-/// getPointerToType - Returns the LLVM register type to use for a pointer to
-/// the given GCC type.
-extern llvm::Type *getPointerToType(tree_node *type);
-
-/// ConvertType - Returns the LLVM type to use for memory that holds a value
-/// of the given GCC type (getRegType should be used for values in registers).
-/// Note that the conversion only considers the main variant of the type.
-extern llvm::Type *ConvertType(tree_node *type);
-
-/// ConvertFunctionType - Convert the specified FUNCTION_TYPE or METHOD_TYPE
-/// tree to an LLVM type. This does the same thing that ConvertType does, but
-/// it also returns the function's LLVM calling convention and attributes.
-extern llvm::FunctionType *ConvertFunctionType(tree_node *type, tree_node *decl,
- tree_node *static_chain,
- llvm::CallingConv::ID &CC,
- llvm::AttrListPtr &PAL);
-
-/// ConvertArgListToFnType - Given a DECL_ARGUMENTS list on an GCC tree,
-/// return the LLVM type corresponding to the function. This is useful for
-/// turning "T foo(...)" functions into "T foo(void)" functions.
-llvm::FunctionType *ConvertArgListToFnType(tree_node *type,
- ArrayRef<tree_node *> arglist,
- tree_node *static_chain,
- bool KNRPromotion,
- llvm::CallingConv::ID &CC,
- llvm::AttrListPtr &PAL);
-
-#endif /* DRAGONEGG_TYPES_H */
Modified: dragonegg/trunk/src/Backend.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Backend.cpp?rev=157918&r1=157917&r2=157918&view=diff
==============================================================================
--- dragonegg/trunk/src/Backend.cpp (original)
+++ dragonegg/trunk/src/Backend.cpp Mon Jun 4 02:54:40 2012
@@ -22,11 +22,11 @@
// Plugin headers
#include "dragonegg/Cache.h"
-#include "dragonegg/Constants.h"
+#include "dragonegg/ConstantConversion.h"
#include "dragonegg/Debug.h"
#include "dragonegg/OS.h"
#include "dragonegg/Target.h"
-#include "dragonegg/Types.h"
+#include "dragonegg/TypeConversion.h"
// LLVM headers
#include "llvm/LLVMContext.h"
Copied: dragonegg/trunk/src/ConstantConversion.cpp (from r157915, dragonegg/trunk/src/Constants.cpp)
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/ConstantConversion.cpp?p2=dragonegg/trunk/src/ConstantConversion.cpp&p1=dragonegg/trunk/src/Constants.cpp&r1=157915&r2=157918&rev=157918&view=diff
==============================================================================
--- dragonegg/trunk/src/Constants.cpp (original)
+++ dragonegg/trunk/src/ConstantConversion.cpp Mon Jun 4 02:54:40 2012
@@ -1,4 +1,4 @@
-//===------- Constants.cpp - Converting and working with constants --------===//
+//===--- ConstantConversion.cpp - Converting and working with constants ---===//
//
// Copyright (C) 2011 to 2012 Duncan Sands
//
@@ -22,9 +22,9 @@
// Plugin headers
#include "dragonegg/Cache.h"
-#include "dragonegg/Constants.h"
+#include "dragonegg/ConstantConversion.h"
#include "dragonegg/Internals.h"
-#include "dragonegg/Types.h"
+#include "dragonegg/TypeConversion.h"
#include "dragonegg/ADT/IntervalList.h"
#include "dragonegg/ADT/Range.h"
Removed: dragonegg/trunk/src/Constants.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Constants.cpp?rev=157917&view=auto
==============================================================================
--- dragonegg/trunk/src/Constants.cpp (original)
+++ dragonegg/trunk/src/Constants.cpp (removed)
@@ -1,1705 +0,0 @@
-//===------- Constants.cpp - Converting and working with constants --------===//
-//
-// Copyright (C) 2011 to 2012 Duncan Sands
-//
-// This file is part of DragonEgg.
-//
-// DragonEgg is free software; you can redistribute it and/or modify it under
-// the terms of the GNU General Public License as published by the Free Software
-// Foundation; either version 2, or (at your option) any later version.
-//
-// DragonEgg is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-// A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along with
-// DragonEgg; see the file COPYING. If not, write to the Free Software
-// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
-//
-//===----------------------------------------------------------------------===//
-// This is the code that converts GCC constants to LLVM.
-//===----------------------------------------------------------------------===//
-
-// Plugin headers
-#include "dragonegg/Cache.h"
-#include "dragonegg/Constants.h"
-#include "dragonegg/Internals.h"
-#include "dragonegg/Types.h"
-#include "dragonegg/ADT/IntervalList.h"
-#include "dragonegg/ADT/Range.h"
-
-// LLVM headers
-#include "llvm/GlobalVariable.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Support/Host.h"
-#include "llvm/Target/TargetData.h"
-
-// System headers
-#include <gmp.h>
-
-// GCC headers
-#include "auto-host.h"
-#ifndef ENABLE_BUILD_WITH_CXX
-extern "C" {
-#endif
-#include "config.h"
-// Stop GCC declaring 'getopt' as it can clash with the system's declaration.
-#undef HAVE_DECL_GETOPT
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "tree.h"
-
-#if (GCC_MINOR < 7)
-#include "flags.h" // For POINTER_TYPE_OVERFLOW_UNDEFINED.
-#endif
-#include "tm_p.h" // For CONSTANT_ALIGNMENT.
-#ifndef ENABLE_BUILD_WITH_CXX
-} // extern "C"
-#endif
-
-// Trees header.
-#include "dragonegg/Trees.h"
-
-using namespace llvm;
-
-static LLVMContext &Context = getGlobalContext();
-
-// Forward declarations.
-static Constant *ConvertInitializerImpl(tree, TargetFolder &);
-static Constant *AddressOfImpl(tree, TargetFolder &);
-
-//===----------------------------------------------------------------------===//
-// ... InterpretAsType ...
-//===----------------------------------------------------------------------===//
-
-// TODO: Implement InterpretAsType more efficiently. Turning everything into
-// bits is simple but can involve a lot of work when dealing with large arrays.
-// For example:
-// struct task_struct {
-// char comm[16];
-// };
-// union task_union {
-// struct task_struct task;
-// unsigned long stack[2048*sizeof(long)/sizeof(long)];
-// };
-// union task_union init_task_union = { { comm: "swapper" } };
-
-typedef Range<int> SignedRange;
-
-/// BitSlice - A contiguous range of bits held in memory.
-namespace {
-
-class BitSlice {
- SignedRange R;
- Constant *Contents; // Null if and only if the range is empty.
-
- bool contentsValid() const {
- if (empty())
- return !Contents;
- return Contents && isa<IntegerType>(Contents->getType()) &&
- getBitWidth() == Contents->getType()->getPrimitiveSizeInBits();
- }
-
- /// ExtendRange - Extend the slice to a wider range. All added bits are zero.
- BitSlice ExtendRange(SignedRange r, TargetFolder &Folder) const;
-
- /// ReduceRange - Reduce the slice to a smaller range discarding any bits that
- /// do not belong to the new range.
- BitSlice ReduceRange(SignedRange r, TargetFolder &Folder) const;
-
-public:
- /// BitSlice - Default constructor: empty bit range.
- BitSlice() : R(), Contents(0) {}
-
- /// BitSlice - Constructor for the given range of bits. The bits themselves
- /// are supplied in 'contents' as a constant of integer type (if the range is
- /// empty then 'contents' must be null). On little-endian machines the least
- /// significant bit of 'contents' corresponds to the first bit of the range
- /// (aka "First"), while on big-endian machines it corresponds to the last bit
- /// of the range (aka "Last-1").
- BitSlice(SignedRange r, Constant *contents) : R(r), Contents(contents) {
- assert(contentsValid() && "Contents do not match range");
- }
-
- /// BitSlice - Constructor for the range of bits ['first', 'last').
- BitSlice(int first, int last, Constant *contents)
- : R(first, last), Contents(contents) {
- assert(contentsValid() && "Contents do not match range");
- }
-
- /// empty - Return whether the bit range is empty.
- bool empty() const {
- return R.empty();
- }
-
- /// getBitWidth - Return the number of bits in the range.
- unsigned getBitWidth() const {
- return (unsigned)R.getWidth();
- }
-
- /// getRange - Return the range of bits in this slice.
- SignedRange getRange() const {
- return R;
- }
-
- /// Displace - Return the result of sliding all bits by the given offset.
- BitSlice Displace(int Offset) const {
- return BitSlice(R.Displace(Offset), Contents);
- }
-
- /// getBits - Return the bits in the given range. The supplied range need not
- /// be contained in the range of the slice, but if not then the bits outside
- /// the slice get an undefined value. The bits are returned as a constant of
- /// integer type. On little-endian machine the least significant bit of the
- /// returned value corresponds to the first bit of the range (aka "First"),
- /// while on big-endian machines it corresponds to the last bit of the range
- /// (aka "Last-1").
- Constant *getBits(SignedRange r, TargetFolder &Folder) const;
-
- /// Merge - Join the slice with another (which must be disjoint), forming the
- /// convex hull of the ranges. The bits in the range of one of the slices are
- /// those of that slice. Any other bits have an undefined value.
- void Merge(const BitSlice &other, TargetFolder &Folder);
-};
-
-} // Unnamed namespace.
-
-/// ExtendRange - Extend the slice to a wider range. All added bits are zero.
-BitSlice BitSlice::ExtendRange(SignedRange r, TargetFolder &Folder) const {
- assert(r.contains(R) && "Not an extension!");
- // Quick exit if the range did not actually increase.
- if (R == r)
- return *this;
- assert(!r.empty() && "Empty ranges did not evaluate as equal?");
- Type *ExtTy = IntegerType::get(Context, (unsigned)r.getWidth());
- // If the slice contains no bits then every bit of the extension is zero.
- if (empty())
- return BitSlice(r, Constant::getNullValue(ExtTy));
- // Extend the contents to the new type.
- Constant *C = Folder.CreateZExtOrBitCast(Contents, ExtTy);
- // Position the old contents correctly inside the new contents.
- unsigned deltaFirst = (unsigned)(R.getFirst() - r.getFirst());
- unsigned deltaLast = (unsigned)(r.getLast() - R.getLast());
- if (BYTES_BIG_ENDIAN && deltaLast) {
- (void)deltaFirst; // Avoid unused variable warning.
- Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaLast);
- C = Folder.CreateShl(C, ShiftAmt);
- } else if (!BYTES_BIG_ENDIAN && deltaFirst) {
- (void)deltaLast; // Avoid unused variable warning.
- Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaFirst);
- C = Folder.CreateShl(C, ShiftAmt);
- }
- return BitSlice(r, C);
-}
-
-/// getBits - Return the bits in the given range. The supplied range need not
-/// be contained in the range of the slice, but if not then the bits outside
-/// the slice get an undefined value. The bits are returned as a constant of
-/// integer type. On little-endian machine the least significant bit of the
-/// returned value corresponds to the first bit of the range (aka "First"),
-/// while on big-endian machines it corresponds to the last bit of the range
-/// (aka "Last-1").
-Constant *BitSlice::getBits(SignedRange r, TargetFolder &Folder) const {
- assert(!r.empty() && "Bit range is empty!");
- // Quick exit if the desired range matches that of the slice.
- if (R == r)
- return Contents;
- Type *RetTy = IntegerType::get(Context, (unsigned)r.getWidth());
- // If the slice contains no bits then every returned bit is undefined.
- if (empty())
- return UndefValue::get(RetTy);
- // Extend to the convex hull of the two ranges.
- BitSlice Slice = ExtendRange(R.Join(r), Folder);
- // Chop the slice down to the requested range.
- Slice = Slice.ReduceRange(r, Folder);
- // Now we can just return the bits contained in the slice.
- return Slice.Contents;
-}
-
-/// Merge - Join the slice with another (which must be disjoint), forming the
-/// convex hull of the ranges. The bits in the range of one of the slices are
-/// those of that slice. Any other bits have an undefined value.
-void BitSlice::Merge(const BitSlice &other, TargetFolder &Folder) {
- // If the other slice is empty, the result is this slice.
- if (other.empty())
- return;
- // If this slice is empty, the result is the other slice.
- if (empty()) {
- *this = other;
- return;
- }
- assert(!R.intersects(other.getRange()) && "Slices overlap!");
-
- // Extend each slice to the convex hull of the ranges.
- SignedRange Hull = R.Join(other.getRange());
- BitSlice ExtThis = ExtendRange(Hull, Folder);
- BitSlice ExtOther = other.ExtendRange(Hull, Folder);
-
- // Since the slices are disjoint and all added bits are zero they can be
- // joined via a simple 'or'.
- *this = BitSlice(Hull, Folder.CreateOr(ExtThis.Contents, ExtOther.Contents));
-}
-
-/// ReduceRange - Reduce the slice to a smaller range discarding any bits that
-/// do not belong to the new range.
-BitSlice BitSlice::ReduceRange(SignedRange r, TargetFolder &Folder) const {
- assert(R.contains(r) && "Not a reduction!");
- // Quick exit if the range did not actually decrease.
- if (R == r)
- return *this;
- // The trivial case of reducing to an empty range.
- if (r.empty())
- return BitSlice();
- assert(!R.empty() && "Empty ranges did not evaluate as equal?");
- // Move the least-significant bit to the correct position.
- Constant *C = Contents;
- unsigned deltaFirst = (unsigned)(r.getFirst() - R.getFirst());
- unsigned deltaLast = (unsigned)(R.getLast() - r.getLast());
- if (BYTES_BIG_ENDIAN && deltaLast) {
- (void)deltaFirst; // Avoid unused variable warning.
- Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaLast);
- C = Folder.CreateLShr(C, ShiftAmt);
- } else if (!BYTES_BIG_ENDIAN && deltaFirst) {
- (void)deltaLast; // Avoid unused variable warning.
- Constant *ShiftAmt = ConstantInt::get(C->getType(), deltaFirst);
- C = Folder.CreateLShr(C, ShiftAmt);
- }
- // Truncate to the new type.
- Type *RedTy = IntegerType::get(Context, (unsigned)r.getWidth());
- C = Folder.CreateTruncOrBitCast(C, RedTy);
- return BitSlice(r, C);
-}
-
-/// ViewAsBits - View the given constant as a bunch of bits, i.e. as one big
-/// integer. Only the bits in the given range are needed, so there is no need
-/// to supply bits outside this range though it is harmless to do so. There is
-/// also no need to supply undefined bits inside the range.
-static BitSlice ViewAsBits(Constant *C, SignedRange R, TargetFolder &Folder) {
- if (R.empty())
- return BitSlice();
-
- // Sanitize the range to make life easier in what follows.
- Type *Ty = C->getType();
- int StoreSize = getTargetData().getTypeStoreSizeInBits(Ty);
- R = R.Meet(SignedRange(0, StoreSize));
-
- // Quick exit if it is clear that there are no bits in the range.
- if (R.empty())
- return BitSlice();
- assert(StoreSize > 0 && "Empty range not eliminated?");
-
- switch (Ty->getTypeID()) {
- default:
- llvm_unreachable("Unsupported type!");
- case Type::PointerTyID: {
- // Cast to an integer with the same number of bits and return that.
- IntegerType *IntTy = getTargetData().getIntPtrType(Context);
- return BitSlice(0, StoreSize, Folder.CreatePtrToInt(C, IntTy));
- }
- case Type::DoubleTyID:
- case Type::FloatTyID:
- case Type::FP128TyID:
- case Type::IntegerTyID:
- case Type::PPC_FP128TyID:
- case Type::X86_FP80TyID:
- case Type::X86_MMXTyID: {
- // Bitcast to an integer with the same number of bits and return that.
- unsigned BitWidth = Ty->getPrimitiveSizeInBits();
- IntegerType *IntTy = IntegerType::get(Context, BitWidth);
- C = Folder.CreateBitCast(C, IntTy);
- // Be careful about where the bits are placed in case this is a funky type
- // like i1. If the width is a multiple of the address unit then there is
- // nothing to worry about: the bits occupy the range [0, StoreSize). But
- // if not then endianness matters: on big-endian machines there are padding
- // bits at the start, while on little-endian machines they are at the end.
- return BYTES_BIG_ENDIAN ?
- BitSlice(StoreSize - BitWidth, StoreSize, C) : BitSlice(0, BitWidth, C);
- }
-
- case Type::ArrayTyID: {
- ArrayType *ATy = cast<ArrayType>(Ty);
- Type *EltTy = ATy->getElementType();
- const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
- assert(Stride > 0 && "Store size smaller than alloc size?");
- // Elements with indices in [FirstElt, LastElt) overlap the range.
- unsigned FirstElt = R.getFirst() / Stride;
- unsigned LastElt = (R.getLast() + Stride - 1) / Stride;
- assert(LastElt <= ATy->getNumElements() && "Store size bigger than array?");
- // Visit all elements that overlap the requested range, accumulating their
- // bits in Bits.
- BitSlice Bits;
- SignedRange StrideRange(0, Stride);
- for (unsigned i = FirstElt; i < LastElt; ++i) {
- int EltOffsetInBits = i * Stride;
- // Extract the element.
- Constant *Elt = Folder.CreateExtractValue(C, i);
- // View it as a bunch of bits.
- SignedRange NeededBits = StrideRange.Meet(R.Displace(-EltOffsetInBits));
- assert(!NeededBits.empty() && "Used element computation wrong!");
- BitSlice EltBits = ViewAsBits(Elt, NeededBits, Folder);
- // Add to the already known bits.
- Bits.Merge(EltBits.Displace(EltOffsetInBits), Folder);
- }
- return Bits;
- }
-
- case Type::StructTyID: {
- StructType *STy = cast<StructType>(Ty);
- const StructLayout *SL = getTargetData().getStructLayout(STy);
- // Fields with indices in [FirstIdx, LastIdx) overlap the range.
- unsigned FirstIdx = SL->getElementContainingOffset(R.getFirst()/8);
- unsigned LastIdx = 1 + SL->getElementContainingOffset((R.getLast()-1)/8);
- // Visit all fields that overlap the requested range, accumulating their
- // bits in Bits.
- BitSlice Bits;
- for (unsigned i = FirstIdx; i < LastIdx; ++i) {
- int FieldOffsetInBits = SL->getElementOffset(i) * 8;
- // Extract the field.
- Constant *Field = Folder.CreateExtractValue(C, i);
- // Only part of the field may be needed. Compute which bits they are.
- Type *FieldTy = Field->getType();
- unsigned FieldStoreSize = getTargetData().getTypeStoreSizeInBits(FieldTy);
- SignedRange NeededBits(0, FieldStoreSize);
- NeededBits = NeededBits.Meet(R.Displace(-FieldOffsetInBits));
- // View the needed part of the field as a bunch of bits.
- if (!NeededBits.empty()) { // No field bits needed if only using padding.
- BitSlice FieldBits = ViewAsBits(Field, NeededBits, Folder);
- // Add to the already known bits.
- Bits.Merge(FieldBits.Displace(FieldOffsetInBits), Folder);
- }
- }
- return Bits;
- }
-
- case Type::VectorTyID: {
- VectorType *VTy = cast<VectorType>(Ty);
- Type *EltTy = VTy->getElementType();
- const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
- assert(Stride > 0 && "Store size smaller than alloc size?");
- // Elements with indices in [FirstElt, LastElt) overlap the range.
- unsigned FirstElt = R.getFirst() / Stride;
- unsigned LastElt = (R.getLast() + Stride - 1) / Stride;
- assert(LastElt <= VTy->getNumElements() && "Store size bigger than vector?");
- // Visit all elements that overlap the requested range, accumulating their
- // bits in Bits.
- BitSlice Bits;
- SignedRange StrideRange(0, Stride);
- for (unsigned i = FirstElt; i < LastElt; ++i) {
- int EltOffsetInBits = i * Stride;
- // Extract the element.
- ConstantInt *Idx = ConstantInt::get(Type::getInt32Ty(Context), i);
- Constant *Elt = Folder.CreateExtractElement(C, Idx);
- // View it as a bunch of bits.
- SignedRange NeededBits = StrideRange.Meet(R.Displace(-EltOffsetInBits));
- assert(!NeededBits.empty() && "Used element computation wrong!");
- BitSlice EltBits = ViewAsBits(Elt, NeededBits, Folder);
- // Add to the already known bits.
- Bits.Merge(EltBits.Displace(EltOffsetInBits), Folder);
- }
- return Bits;
- }
- }
-}
-
-/// InterpretAsType - Interpret the bits of the given constant (starting from
-/// StartingBit) as representing a constant of type 'Ty'. This results in the
-/// same constant as you would get by storing the bits of 'C' to memory (with
-/// the first bit stored being 'StartingBit') and then loading out a (constant)
-/// value of type 'Ty' from the stored to memory location.
-static Constant *InterpretAsType(Constant *C, Type* Ty, int StartingBit,
- TargetFolder &Folder) {
- // Efficient handling for some common cases.
- if (C->getType() == Ty)
- return C;
-
- if (isa<UndefValue>(C))
- return UndefValue::get(Ty);
-
- if (C->isNullValue())
- return Constant::getNullValue(Ty);
-
- // The general case.
- switch (Ty->getTypeID()) {
- default:
- llvm_unreachable("Unsupported type!");
- case Type::IntegerTyID: {
- unsigned BitWidth = Ty->getPrimitiveSizeInBits();
- unsigned StoreSize = getTargetData().getTypeStoreSizeInBits(Ty);
- // Convert the constant into a bunch of bits. Only the bits to be "loaded"
- // out are needed, so rather than converting the entire constant this only
- // converts enough to get all of the required bits.
- BitSlice Bits = ViewAsBits(C, SignedRange(StartingBit,
- StartingBit + StoreSize), Folder);
- // Extract the bits used by the integer. If the integer width is a multiple
- // of the address unit then the endianness of the target doesn't matter. If
- // not then the padding bits come at the start on big-endian machines and at
- // the end on little-endian machines.
- Bits = Bits.Displace(-StartingBit);
- return BYTES_BIG_ENDIAN ?
- Bits.getBits(SignedRange(StoreSize - BitWidth, StoreSize), Folder) :
- Bits.getBits(SignedRange(0, BitWidth), Folder);
- }
-
- case Type::PointerTyID: {
- // Interpret as an integer with the same number of bits then cast back to
- // the original type.
- IntegerType *IntTy = getTargetData().getIntPtrType(Context);
- C = InterpretAsType(C, IntTy, StartingBit, Folder);
- return Folder.CreateIntToPtr(C, Ty);
- }
- case Type::DoubleTyID:
- case Type::FloatTyID:
- case Type::FP128TyID:
- case Type::PPC_FP128TyID:
- case Type::X86_FP80TyID:
- case Type::X86_MMXTyID: {
- // Interpret as an integer with the same number of bits then cast back to
- // the original type.
- unsigned BitWidth = Ty->getPrimitiveSizeInBits();
- IntegerType *IntTy = IntegerType::get(Context, BitWidth);
- return Folder.CreateBitCast(InterpretAsType(C, IntTy, StartingBit, Folder),
- Ty);
- }
-
- case Type::ArrayTyID: {
- // Interpret each array element in turn.
- ArrayType *ATy = cast<ArrayType>(Ty);
- Type *EltTy = ATy->getElementType();
- const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
- const unsigned NumElts = ATy->getNumElements();
- std::vector<Constant*> Vals(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = InterpretAsType(C, EltTy, StartingBit + i*Stride, Folder);
- return ConstantArray::get(ATy, Vals); // TODO: Use ArrayRef constructor.
- }
-
- case Type::StructTyID: {
- // Interpret each struct field in turn.
- StructType *STy = cast<StructType>(Ty);
- const StructLayout *SL = getTargetData().getStructLayout(STy);
- unsigned NumElts = STy->getNumElements();
- std::vector<Constant*> Vals(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = InterpretAsType(C, STy->getElementType(i),
- StartingBit + SL->getElementOffsetInBits(i),
- Folder);
- return ConstantStruct::get(STy, Vals); // TODO: Use ArrayRef constructor.
- }
-
- case Type::VectorTyID: {
- // Interpret each vector element in turn.
- VectorType *VTy = cast<VectorType>(Ty);
- Type *EltTy = VTy->getElementType();
- const unsigned Stride = getTargetData().getTypeAllocSizeInBits(EltTy);
- const unsigned NumElts = VTy->getNumElements();
- SmallVector<Constant*, 16> Vals(NumElts);
- for (unsigned i = 0; i != NumElts; ++i)
- Vals[i] = InterpretAsType(C, EltTy, StartingBit + i*Stride, Folder);
- return ConstantVector::get(Vals);
- }
- }
-}
-
-
-//===----------------------------------------------------------------------===//
-// ... ExtractRegisterFromConstant ...
-//===----------------------------------------------------------------------===//
-
-/// ExtractRegisterFromConstantImpl - Implementation of
-/// ExtractRegisterFromConstant.
-static Constant *ExtractRegisterFromConstantImpl(Constant *C, tree type,
- int StartingByte,
- TargetFolder &Folder) {
- // NOTE: Needs to be kept in sync with getRegType and RepresentAsMemory.
- int StartingBit = StartingByte * BITS_PER_UNIT;
- switch (TREE_CODE(type)) {
-
- default:
- debug_tree(type);
- llvm_unreachable("Unknown register type!");
-
- case BOOLEAN_TYPE:
- case ENUMERAL_TYPE:
- case INTEGER_TYPE: {
- // For integral types, extract an integer with size equal to the mode size,
- // then truncate down to the precision. For example, when extracting a bool
- // this probably first loads out an i8 or i32 which is then truncated to i1.
- // This roundabout approach means we get the right result on both little and
- // big endian machines.
- unsigned Size = GET_MODE_BITSIZE(TYPE_MODE(type));
- Type *MemTy = IntegerType::get(Context, Size);
- C = InterpretAsType(C, MemTy, StartingBit, Folder);
- return Folder.CreateTruncOrBitCast(C, getRegType(type));
- }
-
- case COMPLEX_TYPE: {
- tree elt_type = main_type(type);
- unsigned Stride = GET_MODE_BITSIZE(TYPE_MODE(elt_type));
- Constant *Vals[2] = {
- ExtractRegisterFromConstantImpl(C, elt_type, StartingBit, Folder),
- ExtractRegisterFromConstantImpl(C, elt_type, StartingBit + Stride, Folder)
- };
- return ConstantStruct::getAnon(Vals);
- }
-
- case OFFSET_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- return InterpretAsType(C, getRegType(type), StartingBit, Folder);
-
- case REAL_TYPE:
- // NOTE: This might be wrong for floats with precision less than their alloc
- // size on big-endian machines.
- return InterpretAsType(C, getRegType(type), StartingBit, Folder);
-
- case VECTOR_TYPE: {
- tree elt_type = main_type(type);
- unsigned NumElts = TYPE_VECTOR_SUBPARTS(type);
- unsigned Stride = GET_MODE_BITSIZE(TYPE_MODE(elt_type));
- SmallVector<Constant*, 16> Vals(NumElts);
- IntegerType *IntPtrTy = getTargetData().getIntPtrType(Context);
- for (unsigned i = 0; i != NumElts; ++i) {
- Vals[i] = ExtractRegisterFromConstantImpl(C, elt_type,
- StartingBit+i*Stride, Folder);
- // LLVM does not support vectors of pointers, so turn any pointers into
- // integers.
- if (isa<PointerType>(Vals[i]->getType()))
- Vals[i] = Folder.CreatePtrToInt(Vals[i], IntPtrTy);
- }
- return ConstantVector::get(Vals);
- }
-
- }
-}
-
-/// ExtractRegisterFromConstant - Extract a value of the given scalar GCC type
-/// from a constant. The returned value is of in-register type, as returned by
-/// getRegType, and is what you would get by storing the constant to memory and
-/// using LoadRegisterFromMemory to load a register value back out starting from
-/// byte StartingByte.
-Constant *ExtractRegisterFromConstant(Constant *C, tree type, int StartingByte) {
- TargetFolder Folder(&getTargetData());
- return ExtractRegisterFromConstantImpl(C, type, StartingByte, Folder);
-}
-
-
-//===----------------------------------------------------------------------===//
-// ... ConvertInitializer ...
-//===----------------------------------------------------------------------===//
-
-/// getAsRegister - Turn the given GCC scalar constant into an LLVM constant of
-/// register type.
-static Constant *getAsRegister(tree exp, TargetFolder &Folder) {
- Constant *C = ConvertInitializerImpl(exp, Folder);
- return ExtractRegisterFromConstantImpl(C, main_type(exp), 0, Folder);
-}
-
-/// RepresentAsMemory - Turn a constant of in-register type (corresponding
-/// to the given GCC type) into an in-memory constant. The result has the
-/// property that applying ExtractRegisterFromConstant to it gives you the
-/// original in-register constant back again.
-static Constant *RepresentAsMemory(Constant *C, tree type,
- TargetFolder &Folder) {
- // NOTE: Needs to be kept in sync with ExtractRegisterFromConstant.
- assert(C->getType() == getRegType(type) && "Constant has wrong type!");
- Constant *Result;
-
- switch (TREE_CODE(type)) {
-
- default:
- debug_tree(type);
- llvm_unreachable("Unknown register type!");
-
- case BOOLEAN_TYPE:
- case ENUMERAL_TYPE:
- case INTEGER_TYPE: {
- // For integral types extend to an integer with size equal to the mode size.
- // For example, when inserting a bool this probably extends it to an i8 or
- // to an i32. This approach means we get the right result on both little
- // and big endian machines.
- unsigned Size = GET_MODE_BITSIZE(TYPE_MODE(type));
- Type *MemTy = IntegerType::get(Context, Size);
- bool isSigned = !TYPE_UNSIGNED(type);
- Result = isSigned ? Folder.CreateSExtOrBitCast(C, MemTy) :
- Folder.CreateZExtOrBitCast(C, MemTy);
- break;
- }
-
- case COMPLEX_TYPE: {
- tree elt_type = main_type(type);
- Constant *Real = Folder.CreateExtractValue(C, 0);
- Constant *Imag = Folder.CreateExtractValue(C, 1);
- Real = RepresentAsMemory(Real, elt_type, Folder);
- Imag = RepresentAsMemory(Imag, elt_type, Folder);
- Constant *Vals[2] = { Real, Imag };
- Result = ConstantStruct::getAnon(Vals);
- break;
- }
-
- case OFFSET_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- Result = C;
- break;
-
- case REAL_TYPE:
- // NOTE: This might be wrong for floats with precision less than their alloc
- // size on big-endian machines.
- // If the float precision is less than the alloc size then it will be padded
- // out below.
- Result = C;
- break;
-
- case VECTOR_TYPE: {
- tree elt_type = main_type(type);
- unsigned NumElts = TYPE_VECTOR_SUBPARTS(type);
- std::vector<Constant*> Vals(NumElts);
- for (unsigned i = 0; i != NumElts; ++i) {
- ConstantInt *Idx = ConstantInt::get(Type::getInt32Ty(Context), i);
- Vals[i] = Folder.CreateExtractElement(C, Idx);
- Vals[i] = RepresentAsMemory(Vals[i], elt_type, Folder);
- }
- // The elements may have funky types, so forming a vector may not always be
- // possible.
- Result = ConstantStruct::getAnon(Vals);
- break;
- }
-
- }
-
- // Ensure that the result satisfies the guarantees given by ConvertInitializer
- // by turning it into a type with the right size and an appropriate alignment.
- Result = InterpretAsType(Result, ConvertType(type), 0, Folder);
-
- assert(C == ExtractRegisterFromConstantImpl(Result, type, 0, Folder) &&
- "Register inserted wrong!");
-
- return Result;
-}
-
-/// ConvertInitializerWithCast - Convert the initial value for a global variable
-/// to an equivalent LLVM constant then cast to the given type if both the type
-/// and the initializer are scalar, or if the initializer's type only differs in
-/// trivial ways from the given type. This is convenient for avoiding confusing
-/// and pointless type changes in the IR, and for making explicit the implicit
-/// scalar casts that GCC allows in "assignments" such as initializing a record
-/// field.
-static Constant *ConvertInitializerWithCast(tree exp, tree type,
- TargetFolder &Folder) {
- // Convert the initializer. Note that the type of the returned value may be
- // pretty much anything.
- Constant *C = ConvertInitializerImpl(exp, Folder);
-
- // If casting to or from an aggregate then just return the initializer as is.
- // If the types differ then this is probably something like a struct ending in
- // a flexible array being initialized with a struct ending in an array of some
- // definite size.
- if (isa<AGGREGATE_TYPE>(type) || isa<AGGREGATE_TYPE>(TREE_TYPE(exp)))
- return C;
-
- // Scalar to scalar cast. This is where the implicit scalar casts that GCC
- // permits are made explicit.
- Type *DestTy = getRegType(type);
- if (C->getType() == DestTy)
- // No cast is needed if the type is already correct.
- return C;
-
- // Ensure that the initializer has a sensible type. Note that it would be
- // wrong to just interpret the constant as being of type DestTy here since
- // that would not perform a value extension (adding extra zeros or sign bits
- // when casting to a larger integer type for example): any extra bits would
- // wrongly get an undefined value instead.
- C = ExtractRegisterFromConstantImpl(C, main_type(exp), 0, Folder);
-
- // Cast to the desired type.
- bool SrcIsSigned = !TYPE_UNSIGNED(TREE_TYPE(exp));
- bool DestIsSigned = !TYPE_UNSIGNED(type);
- Instruction::CastOps opcode = CastInst::getCastOpcode(C, SrcIsSigned, DestTy,
- DestIsSigned);
- C = Folder.CreateCast(opcode, C, DestTy);
-
- return RepresentAsMemory(C, type, Folder);
-}
-
-/// ConvertCST - Return the given simple constant as an array of bytes. For the
-/// moment only INTEGER_CST, REAL_CST, COMPLEX_CST and VECTOR_CST are supported.
-static Constant *ConvertCST(tree exp, TargetFolder &) {
- const tree type = main_type(exp);
- unsigned SizeInChars = (TREE_INT_CST_LOW(TYPE_SIZE(type)) + CHAR_BIT - 1) /
- CHAR_BIT;
- // Encode the constant in Buffer in target format.
- SmallVector<uint8_t, 16> Buffer(SizeInChars);
- unsigned CharsWritten = native_encode_expr(exp, &Buffer[0], SizeInChars);
- assert(CharsWritten == SizeInChars && "Failed to fully encode expression!");
- (void)CharsWritten; // Avoid unused variable warning when assertions disabled.
- // Turn it into an LLVM byte array.
- return ConstantDataArray::get(Context, Buffer);
-}
-
-static Constant *ConvertSTRING_CST(tree exp, TargetFolder &) {
- // TODO: Enhance GCC's native_encode_expr to handle arbitrary strings and not
- // just those with a byte component type; then ConvertCST can handle strings.
- ArrayType *StrTy = cast<ArrayType>(ConvertType(TREE_TYPE(exp)));
- Type *ElTy = StrTy->getElementType();
-
- unsigned Len = (unsigned)TREE_STRING_LENGTH(exp);
-
- std::vector<Constant*> Elts;
- if (ElTy->isIntegerTy(8)) {
- const unsigned char *InStr =(const unsigned char *)TREE_STRING_POINTER(exp);
- for (unsigned i = 0; i != Len; ++i)
- Elts.push_back(ConstantInt::get(Type::getInt8Ty(Context), InStr[i]));
- } else if (ElTy->isIntegerTy(16)) {
- assert((Len&1) == 0 &&
- "Length in bytes should be a multiple of element size");
- const uint16_t *InStr =
- (const unsigned short *)TREE_STRING_POINTER(exp);
- for (unsigned i = 0; i != Len/2; ++i) {
- // gcc has constructed the initializer elements in the target endianness,
- // but we're going to treat them as ordinary shorts from here, with
- // host endianness. Adjust if necessary.
- if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
- Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context), InStr[i]));
- else
- Elts.push_back(ConstantInt::get(Type::getInt16Ty(Context),
- ByteSwap_16(InStr[i])));
- }
- } else if (ElTy->isIntegerTy(32)) {
- assert((Len&3) == 0 &&
- "Length in bytes should be a multiple of element size");
- const uint32_t *InStr = (const uint32_t *)TREE_STRING_POINTER(exp);
- for (unsigned i = 0; i != Len/4; ++i) {
- // gcc has constructed the initializer elements in the target endianness,
- // but we're going to treat them as ordinary ints from here, with
- // host endianness. Adjust if necessary.
- if (llvm::sys::isBigEndianHost() == BYTES_BIG_ENDIAN)
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context), InStr[i]));
- else
- Elts.push_back(ConstantInt::get(Type::getInt32Ty(Context),
- ByteSwap_32(InStr[i])));
- }
- } else {
- llvm_unreachable("Unknown character type!");
- }
-
- unsigned LenInElts = Len /
- TREE_INT_CST_LOW(TYPE_SIZE_UNIT(main_type(main_type(exp))));
- unsigned ConstantSize = StrTy->getNumElements();
-
- if (LenInElts != ConstantSize) {
- // If this is a variable sized array type, set the length to LenInElts.
- if (ConstantSize == 0) {
- tree Domain = TYPE_DOMAIN(main_type(exp));
- if (!Domain || !TYPE_MAX_VALUE(Domain)) {
- ConstantSize = LenInElts;
- StrTy = ArrayType::get(ElTy, LenInElts);
- }
- }
-
- if (ConstantSize < LenInElts) {
- // Only some chars are being used, truncate the string: char X[2] = "foo";
- Elts.resize(ConstantSize);
- } else {
- // Fill the end of the string with nulls.
- Constant *C = Constant::getNullValue(ElTy);
- for (; LenInElts != ConstantSize; ++LenInElts)
- Elts.push_back(C);
- }
- }
- return ConstantArray::get(StrTy, Elts);
-}
-
-static Constant *ConvertADDR_EXPR(tree exp, TargetFolder &Folder) {
- return AddressOfImpl(TREE_OPERAND(exp, 0), Folder);
-}
-
-/// ConvertArrayCONSTRUCTOR - Convert a CONSTRUCTOR with array or vector type.
-static Constant *ConvertArrayCONSTRUCTOR(tree exp, TargetFolder &Folder) {
- const TargetData &TD = getTargetData();
-
- tree init_type = main_type(exp);
- Type *InitTy = ConvertType(init_type);
-
- tree elt_type = main_type(init_type);
- Type *EltTy = ConvertType(elt_type);
-
- // Check that the element type has a known, constant size.
- assert(isSizeCompatible(elt_type) && "Variable sized array element!");
- uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);
-
- /// Elts - The initial values to use for the array elements. A null entry
- /// means that the corresponding array element should be default initialized.
- std::vector<Constant*> Elts;
-
- // Resize to the number of array elements if known. This ensures that every
- // element will be at least default initialized even if no initial value is
- // given for it.
- uint64_t TypeElts = isa<ARRAY_TYPE>(init_type) ?
- ArrayLengthOf(init_type) : TYPE_VECTOR_SUBPARTS(init_type);
- if (TypeElts != NO_LENGTH)
- Elts.resize(TypeElts);
-
- // If GCC indices into the array need adjusting to make them zero indexed then
- // record here the value to subtract off.
- tree lower_bnd = NULL_TREE;
- if (isa<ARRAY_TYPE>(init_type) && TYPE_DOMAIN(init_type) &&
- !integer_zerop(TYPE_MIN_VALUE(TYPE_DOMAIN(init_type))))
- lower_bnd = TYPE_MIN_VALUE(TYPE_DOMAIN(init_type));
-
- unsigned NextIndex = 0;
- unsigned HOST_WIDE_INT ix;
- tree elt_index, elt_value;
- FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(exp), ix, elt_index, elt_value) {
- // Find and decode the constructor's value.
- Constant *Val = ConvertInitializerWithCast(elt_value, elt_type, Folder);
- uint64_t ValSize = TD.getTypeAllocSizeInBits(Val->getType());
- assert(ValSize <= EltSize && "Element initial value too big!");
-
- // If the initial value is smaller than the element size then pad it out.
- if (ValSize < EltSize) {
- unsigned PadBits = EltSize - ValSize;
- assert(PadBits % BITS_PER_UNIT == 0 && "Non-unit type size?");
- unsigned Units = PadBits / BITS_PER_UNIT;
- Constant *PaddedElt[] = {
- Val, UndefValue::get(GetUnitType(Context, Units))
- };
-
- Val = ConstantStruct::getAnon(PaddedElt);
- }
-
- // Get the index position of the element within the array. Note that this
- // can be NULL_TREE, which means that it belongs in the next available slot.
- tree index = elt_index;
-
- // The first and last elements to fill in, inclusive.
- unsigned FirstIndex, LastIndex;
- if (!index) {
- LastIndex = FirstIndex = NextIndex;
- } else if (isa<RANGE_EXPR>(index)) {
- tree first = TREE_OPERAND(index, 0);
- tree last = TREE_OPERAND(index, 1);
-
- // Subtract off the lower bound if any to ensure indices start from zero.
- if (lower_bnd != NULL_TREE) {
- first = fold_build2(MINUS_EXPR, main_type(first), first, lower_bnd);
- last = fold_build2(MINUS_EXPR, main_type(last), last, lower_bnd);
- }
-
- assert(host_integerp(first, 1) && host_integerp(last, 1) &&
- "Unknown range_expr!");
- FirstIndex = tree_low_cst(first, 1);
- LastIndex = tree_low_cst(last, 1);
- } else {
- // Subtract off the lower bound if any to ensure indices start from zero.
- if (lower_bnd != NULL_TREE)
- index = fold_build2(MINUS_EXPR, main_type(index), index, lower_bnd);
- assert(host_integerp(index, 1));
- FirstIndex = tree_low_cst(index, 1);
- LastIndex = FirstIndex;
- }
-
- // Process all of the elements in the range.
- if (LastIndex >= Elts.size())
- Elts.resize(LastIndex + 1);
- for (; FirstIndex <= LastIndex; ++FirstIndex)
- Elts[FirstIndex] = Val;
-
- NextIndex = FirstIndex;
- }
-
- unsigned NumElts = Elts.size();
-
- // Zero length array.
- if (!NumElts)
- return getDefaultValue(InitTy);
-
- // Default initialize any elements that had no initial value specified.
- Constant *DefaultElt = getDefaultValue(EltTy);
- for (unsigned i = 0; i != NumElts; ++i)
- if (!Elts[i])
- Elts[i] = DefaultElt;
-
- // Check whether any of the elements have different types. If so we need to
- // return a struct instead of an array. This can occur in cases where we have
- // an array of unions, and the various unions had different parts initialized.
- // While there, compute the maximum element alignment.
- bool isHomogeneous = true;
- Type *ActualEltTy = Elts[0]->getType();
- unsigned MaxAlign = TD.getABITypeAlignment(ActualEltTy);
- for (unsigned i = 1; i != NumElts; ++i)
- if (Elts[i]->getType() != ActualEltTy) {
- MaxAlign = std::max(TD.getABITypeAlignment(Elts[i]->getType()), MaxAlign);
- isHomogeneous = false;
- }
-
- // We guarantee that initializers are always at least as big as the LLVM type
- // for the initializer. If needed, append padding to ensure this.
- uint64_t TypeSize = TD.getTypeAllocSizeInBits(InitTy);
- if (NumElts * EltSize < TypeSize) {
- unsigned PadBits = TypeSize - NumElts * EltSize;
- assert(PadBits % BITS_PER_UNIT == 0 && "Non-unit type size?");
- unsigned Units = PadBits / BITS_PER_UNIT;
- Elts.push_back(UndefValue::get(GetUnitType(Context, Units)));
- isHomogeneous = false;
- }
-
- // If any elements are more aligned than the GCC type then we need to return a
- // packed struct. This can happen if the user forced a small alignment on the
- // array type.
- if (MaxAlign * 8 > TYPE_ALIGN(main_type(exp)))
- return ConstantStruct::getAnon(Context, Elts, /*Packed*/true);
-
- // Return as a struct if the contents are not homogeneous.
- if (!isHomogeneous) {
- std::vector<Constant*> StructElts;
- unsigned First = 0, E = Elts.size();
- while (First < E) {
- // Find the maximal value of Last s.t. all elements in the range
- // [First, Last) have the same type.
- Type *Ty = Elts[First]->getType();
- unsigned Last = First + 1;
- for (; Last != E; ++Last)
- if (Elts[Last]->getType() != Ty)
- break;
- unsigned NumSameType = Last - First;
- Constant *StructElt;
- if (NumSameType == 1)
- StructElt = Elts[First];
- else
- StructElt = ConstantArray::get(ArrayType::get(Ty, NumSameType),
- ArrayRef<Constant*>(&Elts[First],
- NumSameType));
- StructElts.push_back(StructElt);
- First = Last;
- }
- return ConstantStruct::getAnon(Context, StructElts);
- }
-
- // Make the IR more pleasant by returning as a vector if the GCC type was a
- // vector. However this is only correct if the initial values had the same
- // type as the vector element type, rather than some random other type.
- if (isa<VECTOR_TYPE>(init_type) && ActualEltTy == EltTy) {
- // If this is a vector of pointers, convert it to a vector of integers.
- if (isa<PointerType>(EltTy)) {
- IntegerType *IntPtrTy = getTargetData().getIntPtrType(Context);
- for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- Elts[i] = Folder.CreatePtrToInt(Elts[i], IntPtrTy);
- }
- return ConstantVector::get(Elts);
- }
- return ConstantArray::get(ArrayType::get(ActualEltTy, Elts.size()), Elts);
-}
-
-/// FieldContents - A constant restricted to a range of bits. Any part of the
-/// constant outside of the range is discarded. The range may be bigger than
-/// the constant in which case any extra bits have an undefined value.
-namespace {
-
-class FieldContents {
- TargetFolder &Folder;
- SignedRange R; // The range of bits occupied by the constant.
- Constant *C; // The constant. May be null if the range is empty.
- int Starts; // The first bit of the constant is positioned at this offset.
-
- FieldContents(SignedRange r, Constant *c, int starts, TargetFolder &folder)
- : Folder(folder), R(r), C(c), Starts(starts) {
- assert((R.empty() || C) && "Need constant when range not empty!");
- }
-
- /// getAsBits - Return the bits in the range as an integer (or null if the
- /// range is empty).
- Constant *getAsBits() const {
- if (R.empty())
- return 0;
- Type *IntTy = IntegerType::get(Context, R.getWidth());
- return InterpretAsType(C, IntTy, R.getFirst() - Starts, Folder);
- }
-
- /// isSafeToReturnContentsDirectly - Return whether the current value for the
- /// constant properly represents the bits in the range and so can be handed to
- /// the user as is.
- bool isSafeToReturnContentsDirectly(const TargetData &TD) const {
- // If there is no constant (allowed when the range is empty) then one needs
- // to be created.
- if (!C)
- return false;
- // If the first bit of the constant is not the first bit of the range then
- // it needs to be displaced before being passed to the user.
- if (!R.empty() && R.getFirst() != Starts)
- return false;
- Type *Ty = C->getType();
- // Check that the type isn't something like i17. Avoiding types like this
- // is not needed for correctness, but makes life easier for the optimizers.
- if ((Ty->getPrimitiveSizeInBits() % BITS_PER_UNIT) != 0)
- return false;
- // If the constant is wider than the range then it needs to be truncated
- // before being passed to the user.
- unsigned AllocBits = TD.getTypeAllocSizeInBits(Ty);
- return AllocBits <= (unsigned)R.getWidth();
- }
-
-public:
- /// get - Fill the range [first, last) with the given constant.
- static FieldContents get(int first, int last, Constant *c,
- TargetFolder &folder) {
- return FieldContents(SignedRange(first, last), c, first, folder);
- }
-
- // Copy assignment operator.
- FieldContents &operator=(const FieldContents &other) {
- R = other.R; C = other.C; Starts = other.Starts; Folder = other.Folder;
- return *this;
- }
-
- /// getRange - Return the range occupied by this field.
- SignedRange getRange() const { return R; }
-
- /// ChangeRangeTo - Change the range occupied by this field.
- void ChangeRangeTo(SignedRange r) { R = r; }
-
- /// JoinWith - Form the union of this field with another field (which must be
- /// disjoint from this one). After this the range will be the convex hull of
- /// the ranges of the two fields.
- void JoinWith(const FieldContents &S);
-
- /// extractContents - Return the contained bits as a constant which contains
- /// every defined bit in the range, yet is guaranteed to have alloc size no
- /// larger than the width of the range. Unlike the other methods for this
- /// class, this one requires that the width of the range be a multiple of an
- /// address unit, which usually means a multiple of 8.
- Constant *extractContents(const TargetData &TD) {
- assert(R.getWidth() % BITS_PER_UNIT == 0 && "Boundaries not aligned?");
- /// If the current value for the constant can be used to represent the bits
- /// in the range then just return it.
- if (isSafeToReturnContentsDirectly(TD))
- return C;
- // If the range is empty then return a constant with zero size.
- if (R.empty()) {
- // Return an empty array. Remember the returned value as an optimization
- // in case we are called again.
- C = UndefValue::get(GetUnitType(Context, 0));
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
- return C;
- }
- // If the type is something like i17 then round it up to a multiple of a
- // byte. This is not needed for correctness, but helps the optimizers.
- if ((C->getType()->getPrimitiveSizeInBits() % BITS_PER_UNIT) != 0) {
- Type *Ty = C->getType();
- assert(Ty->isIntegerTy() && "Non-integer type with non-byte size!");
- unsigned BitWidth = RoundUpToAlignment(Ty->getPrimitiveSizeInBits(),
- BITS_PER_UNIT);
- Ty = IntegerType::get(Context, BitWidth);
- C = TheFolder->CreateZExtOrBitCast(C, Ty);
- if (isSafeToReturnContentsDirectly(TD))
- return C;
- }
- // Turn the contents into a bunch of bytes. Remember the returned value as
- // an optimization in case we are called again.
- // TODO: If the contents only need to be truncated and have struct or array
- // type then we could try to do the truncation by dropping or modifying the
- // last elements of the constant, maybe yielding something less horrible.
- unsigned Units = R.getWidth() / BITS_PER_UNIT;
- C = InterpretAsType(C, GetUnitType(Context, Units), R.getFirst() - Starts,
- Folder);
- Starts = R.getFirst();
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
- return C;
- }
-};
-
-} // Unnamed namespace.
-
-/// JoinWith - Form the union of this field with another field (which must be
-/// disjoint from this one). After this the range will be the convex hull of
-/// the ranges of the two fields.
-void FieldContents::JoinWith(const FieldContents &S) {
- if (S.R.empty())
- return;
- if (R.empty()) {
- *this = S;
- return;
- }
- // Consider the contents of the fields to be bunches of bits and paste them
- // together. This can result in a nasty integer constant expression, but as
- // we only get here for bitfields that's mostly harmless.
- BitSlice Bits(R, getAsBits());
- Bits.Merge (BitSlice(S.R, S.getAsBits()), Folder);
- R = Bits.getRange();
- C = Bits.getBits(R, Folder);
- Starts = R.empty() ? 0 : R.getFirst();
-}
-
-static Constant *ConvertRecordCONSTRUCTOR(tree exp, TargetFolder &Folder) {
- // FIXME: This new logic, especially the handling of bitfields, is untested
- // and probably wrong on big-endian machines.
- IntervalList<FieldContents, int, 8> Layout;
- const TargetData &TD = getTargetData();
- tree type = main_type(exp);
- Type *Ty = ConvertType(type);
- uint64_t TypeSize = TD.getTypeAllocSizeInBits(Ty);
-
- // Ensure that fields without an initial value are default initialized by
- // explicitly setting the starting value for all fields to be zero. If an
- // initial value is supplied for a field then the value will overwrite and
- // replace the zero starting value later.
- if (flag_default_initialize_globals) {
- // Record all interesting fields so they can easily be visited backwards.
- SmallVector<tree, 16> Fields;
- for (tree field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
- if (!isa<FIELD_DECL>(field)) continue;
- // Ignore fields with variable or unknown position since they cannot be
- // default initialized.
- if (!OffsetIsLLVMCompatible(field))
- continue;
- Fields.push_back(field);
- }
-
- // Process the fields in reverse order. This is for the benefit of union
- // types for which the first field must be default initialized (iterating
- // in forward order would default initialize the last field).
- for (SmallVector<tree, 16>::reverse_iterator I = Fields.rbegin(),
- E = Fields.rend(); I != E; ++I) {
- tree field = *I;
- uint64_t FirstBit = getFieldOffsetInBits(field);
- assert(FirstBit <= TypeSize && "Field off end of type!");
- // Determine the width of the field.
- uint64_t BitWidth;
- Type *FieldTy = ConvertType(TREE_TYPE(field));
- if (isInt64(DECL_SIZE(field), true)) {
- // The field has a size and it is a constant, so use it. Note that
- // this size may be smaller than the type size. For example, if the
- // next field starts inside alignment padding at the end of this one
- // then DECL_SIZE will be the size with the padding used by the next
- // field not included.
- BitWidth = getInt64(DECL_SIZE(field), true);
- } else {
- // If the field has variable or unknown size then use the size of the
- // LLVM type instead as it gives the minimum size the field may have.
- if (!FieldTy->isSized())
- // An incomplete type - this field cannot be default initialized.
- continue;
- BitWidth = TD.getTypeAllocSizeInBits(FieldTy);
- if (FirstBit + BitWidth > TypeSize)
- BitWidth = TypeSize - FirstBit;
- }
- uint64_t LastBit = FirstBit + BitWidth;
-
- // Zero the bits occupied by the field. It is safe to use FieldTy here as
- // it is guaranteed to cover all parts of the GCC type that can be default
- // initialized. This makes for nicer IR than just using a bunch of bytes.
- Constant *Zero = Constant::getNullValue(FieldTy);
- Layout.AddInterval(FieldContents::get(FirstBit, LastBit, Zero, Folder));
- }
- }
-
- // For each field for which an initial value was specified, set the bits
- // occupied by the field to that value.
- unsigned HOST_WIDE_INT ix;
- tree field, next_field, value;
- next_field = TYPE_FIELDS(type);
- FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(exp), ix, field, value) {
- if (!field) {
- // Move on to the next FIELD_DECL, skipping contained methods, types etc.
- field = next_field;
- while (1) {
- assert(field && "Fell off end of record!");
- if (isa<FIELD_DECL>(field)) break;
- field = TREE_CHAIN(field);
- }
- }
- next_field = TREE_CHAIN(field);
-
- assert(isa<FIELD_DECL>(field) && "Initial value not for a field!");
- assert(OffsetIsLLVMCompatible(field) && "Field position not known!");
- // Turn the initial value for this field into an LLVM constant.
- Constant *Init = ConvertInitializerWithCast(value, main_type(field),
- Folder);
- // Work out the range of bits occupied by the field.
- uint64_t FirstBit = getFieldOffsetInBits(field);
- assert(FirstBit <= TypeSize && "Field off end of type!");
- // If a size was specified for the field then use it. Otherwise take the
- // size from the initial value.
- uint64_t BitWidth = isInt64(DECL_SIZE(field), true) ?
- getInt64(DECL_SIZE(field), true) :
- TD.getTypeAllocSizeInBits(Init->getType());
- uint64_t LastBit = FirstBit + BitWidth;
-
- // Set the bits occupied by the field to the initial value.
- Layout.AddInterval(FieldContents::get(FirstBit, LastBit, Init, Folder));
- }
-
- // Force all fields to begin and end on a byte boundary. This automagically
- // takes care of bitfields.
- Layout.AlignBoundaries(BITS_PER_UNIT);
-
- // Determine whether to return a packed struct. If returning an ordinary
- // struct would result in an initializer that is more aligned than its GCC
- // type then return a packed struct instead. If a field's alignment would
- // make it start after its desired position then also use a packed struct.
- bool Pack = false;
- unsigned MaxAlign = TYPE_ALIGN(type);
- for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
- FieldContents F = Layout.getInterval(i);
- unsigned First = F.getRange().getFirst();
- Constant *Val = F.extractContents(TD);
- unsigned Alignment = TD.getABITypeAlignment(Val->getType()) * 8;
- if (Alignment > MaxAlign || First % Alignment) {
- Pack = true;
- break;
- }
- }
-
- // Create the elements that will make up the struct. As well as the fields
- // themselves there may also be padding elements.
- std::vector<Constant*> Elts;
- Elts.reserve(Layout.getNumIntervals());
- unsigned EndOfPrevious = 0; // Offset of first bit after previous element.
- for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
- FieldContents F = Layout.getInterval(i);
- unsigned First = F.getRange().getFirst();
- Constant *Val = F.extractContents(TD);
- assert(EndOfPrevious <= First && "Previous field too big!");
-
- // If there is a gap then we may need to fill it with padding.
- if (First > EndOfPrevious) {
- // There is a gap between the end of the previous field and the start of
- // this one. The alignment of the field contents may mean that it will
- // start at the right offset anyway, but if not then insert padding.
- bool NeedPadding = true;
- if (!Pack) {
- // If the field's alignment will take care of the gap then there is no
- // need for padding.
- unsigned Alignment = TD.getABITypeAlignment(Val->getType()) * 8;
- if (First == (EndOfPrevious + Alignment - 1) / Alignment * Alignment)
- NeedPadding = false;
- }
- if (NeedPadding) {
- // Fill the gap with undefined bytes.
- assert((First - EndOfPrevious) % BITS_PER_UNIT == 0 &&
- "Non-unit field boundaries!");
- unsigned Units = (First - EndOfPrevious) / BITS_PER_UNIT;
- Elts.push_back(UndefValue::get(GetUnitType(Context, Units)));
- }
- }
-
- // Append the field.
- Elts.push_back(Val);
- EndOfPrevious = First + TD.getTypeAllocSizeInBits(Val->getType());
- }
-
- // We guarantee that initializers are always at least as big as the LLVM type
- // for the initializer. If needed, append padding to ensure this.
- if (EndOfPrevious < TypeSize) {
- assert((TypeSize - EndOfPrevious) % BITS_PER_UNIT == 0 &&
- "Non-unit type size?");
- unsigned Units = (TypeSize - EndOfPrevious) / BITS_PER_UNIT;
- Elts.push_back(UndefValue::get(GetUnitType(Context, Units)));
- }
-
- // Okay, we're done. Return the computed elements as a constant with the type
- // of exp if possible.
- if (StructType *STy = dyn_cast<StructType>(Ty))
- if (STy->isPacked() == Pack && STy->getNumElements() == Elts.size()) {
- bool EltTypesMatch = true;
- for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
- Type *EltTy = Elts[i]->getType();
- Type *FieldTy = STy->getElementType(i);
- if (EltTy == FieldTy)
- continue;
- // When a recursive record type is converted, some of its pointer fields
- // may be converted to the artifical type {}* to break the recursion. As
- // type converting the field directly gives the proper pointer type, the
- // result is a mismatch between the field and element types. Fix it up.
- if (EltTy->isPointerTy() && FieldTy->isPointerTy()) {
- Elts[i] = Folder.CreateBitCast(Elts[i], FieldTy);
- continue;
- }
- // Too hard, just give up.
- EltTypesMatch = false;
- break;
- }
- if (EltTypesMatch)
- return ConstantStruct::get(STy, Elts);
- }
-
- // Otherwise return the computed elements as an anonymous struct.
- return ConstantStruct::getAnon(Context, Elts, Pack);
-}
-
-static Constant *ConvertCONSTRUCTOR(tree exp, TargetFolder &Folder) {
- // If the constructor is empty then default initialize all of the components.
- // It is safe to use the LLVM type here as it covers every part of the GCC
- // type that can possibly be default initialized.
- if (CONSTRUCTOR_NELTS(exp) == 0)
- return getDefaultValue(ConvertType(TREE_TYPE(exp)));
-
- switch (TREE_CODE(TREE_TYPE(exp))) {
- default:
- debug_tree(exp);
- llvm_unreachable("Unknown constructor!");
- case VECTOR_TYPE:
- case ARRAY_TYPE: return ConvertArrayCONSTRUCTOR(exp, Folder);
- case QUAL_UNION_TYPE:
- case RECORD_TYPE:
- case UNION_TYPE: return ConvertRecordCONSTRUCTOR(exp, Folder);
- }
-}
-
-static Constant *ConvertMINUS_EXPR(tree exp, TargetFolder &Folder) {
- Constant *LHS = getAsRegister(TREE_OPERAND(exp, 0), Folder);
- Constant *RHS = getAsRegister(TREE_OPERAND(exp, 1), Folder);
- return RepresentAsMemory(Folder.CreateSub(LHS, RHS), main_type(exp), Folder);
-}
-
-static Constant *ConvertPLUS_EXPR(tree exp, TargetFolder &Folder) {
- Constant *LHS = getAsRegister(TREE_OPERAND(exp, 0), Folder);
- Constant *RHS = getAsRegister(TREE_OPERAND(exp, 1), Folder);
- return RepresentAsMemory(Folder.CreateAdd(LHS, RHS), main_type(exp), Folder);
-}
-
-static Constant *ConvertPOINTER_PLUS_EXPR(tree exp, TargetFolder &Folder) {
- Constant *Ptr = getAsRegister(TREE_OPERAND(exp, 0), Folder); // Pointer
- Constant *Idx = getAsRegister(TREE_OPERAND(exp, 1), Folder); // Offset (units)
-
- // Convert the pointer into an i8* and add the offset to it.
- Ptr = Folder.CreateBitCast(Ptr, GetUnitPointerType(Context));
- Constant *Result = POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Folder.CreateInBoundsGetElementPtr(Ptr, Idx) :
- Folder.CreateGetElementPtr(Ptr, Idx);
-
- // The result may be of a different pointer type.
- Result = Folder.CreateBitCast(Result, getRegType(TREE_TYPE(exp)));
-
- return RepresentAsMemory(Result, main_type(exp), Folder);
-}
-
-static Constant *ConvertVIEW_CONVERT_EXPR(tree exp, TargetFolder &Folder) {
- // Does not change the bits, only the type they are considered to be.
- return ConvertInitializerImpl(TREE_OPERAND(exp, 0), Folder);
-}
-
-/// ConvertInitializerImpl - Implementation of ConvertInitializer.
-static Constant *ConvertInitializerImpl(tree exp, TargetFolder &Folder) {
- assert(!isa<CONST_DECL>(exp) && !HAS_RTL_P(exp) &&
- "Cache collision with decl_llvm!");
-
- // If we already converted the initializer then return the cached copy.
- if (Constant *C = cast_or_null<Constant>(getCachedValue(exp)))
- return C;
-
- Constant *Init;
- switch (TREE_CODE(exp)) {
- default:
- debug_tree(exp);
- llvm_unreachable("Unknown constant to convert!");
- case COMPLEX_CST:
- case INTEGER_CST:
- case REAL_CST:
- case VECTOR_CST:
- Init = ConvertCST(exp, Folder);
- break;
- case STRING_CST:
- Init = ConvertSTRING_CST(exp, Folder);
- break;
- case ADDR_EXPR:
- Init = ConvertADDR_EXPR(exp, Folder);
- break;
- case CONSTRUCTOR:
- Init = ConvertCONSTRUCTOR(exp, Folder);
- break;
- case CONVERT_EXPR:
- case NOP_EXPR:
- Init = ConvertInitializerWithCast(TREE_OPERAND(exp, 0), main_type(exp),
- Folder);
- break;
- case MINUS_EXPR:
- Init = ConvertMINUS_EXPR(exp, Folder);
- break;
- case PLUS_EXPR:
- Init = ConvertPLUS_EXPR(exp, Folder);
- break;
- case POINTER_PLUS_EXPR:
- Init = ConvertPOINTER_PLUS_EXPR(exp, Folder);
- break;
- case VIEW_CONVERT_EXPR:
- Init = ConvertVIEW_CONVERT_EXPR(exp, Folder);
- break;
- }
-
- // Make the IR easier to read by returning a constant of the expected type if
- // it is safe and efficient to do so.
- if (!isa<AGGREGATE_TYPE>(TREE_TYPE(exp)))
- Init = InterpretAsType(Init, ConvertType(TREE_TYPE(exp)), 0, Folder);
-
-#ifndef NDEBUG
- // Check that the guarantees we make about the returned value actually hold.
- // The initializer should always be at least as big as the constructor's type,
- // and except in the cases of incomplete types or types with variable size the
- // sizes should be the same.
- Type *Ty = ConvertType(TREE_TYPE(exp));
- if (Ty->isSized()) {
- uint64_t InitSize = getTargetData().getTypeAllocSizeInBits(Init->getType());
- uint64_t TypeSize = getTargetData().getTypeAllocSizeInBits(Ty);
- if (InitSize < TypeSize) {
- debug_tree(exp);
- llvm_unreachable("Constant too small for type!");
- }
- }
- if (getTargetData().getABITypeAlignment(Init->getType()) * 8 >
- TYPE_ALIGN(main_type(exp))) {
- debug_tree(exp);
- llvm_unreachable("Constant over aligned!");
- }
-#endif
-
- // Cache the result of converting the initializer since the same tree is often
- // converted multiple times.
- setCachedValue(exp, Init);
-
- return Init;
-}
-
-/// ConvertInitializer - Convert the initial value for a global variable to an
-/// equivalent LLVM constant. Also handles constant constructors. The type of
-/// the returned value may be pretty much anything. All that is guaranteed is
-/// that its alloc size is equal to the size of the initial value and that its
-/// alignment is less than or equal to the initial value's GCC type alignment.
-/// Note that the GCC type may have variable size or no size, in which case the
-/// size is determined by the initial value. When this happens the size of the
-/// initial value may exceed the alloc size of the LLVM memory type generated
-/// for the GCC type (see ConvertType); it is never smaller than the alloc size.
-Constant *ConvertInitializer(tree exp) {
- TargetFolder Folder(&getTargetData());
- return ConvertInitializerImpl(exp, Folder);
-}
-
-
-//===----------------------------------------------------------------------===//
-// ... AddressOf ...
-//===----------------------------------------------------------------------===//
-
-/// AddressOfSimpleConstant - Return the address of a simple constant, such as a
-/// number or constructor.
-static Constant *AddressOfSimpleConstant(tree exp, TargetFolder &Folder) {
- Constant *Init = ConvertInitializerImpl(exp, Folder);
-
- // Cache the constants to avoid making obvious duplicates that have to be
- // folded by the optimizer.
- static DenseMap<Constant*, GlobalVariable*> CSTCache;
- GlobalVariable *&Slot = CSTCache[Init];
- if (Slot)
- return Slot;
-
- // Create a new global variable.
- Slot = new GlobalVariable(*TheModule, Init->getType(), true,
- GlobalVariable::LinkerPrivateLinkage, Init, ".cst");
- unsigned align = TYPE_ALIGN(main_type(exp));
-#ifdef CONSTANT_ALIGNMENT
- align = CONSTANT_ALIGNMENT(exp, align);
-#endif
- Slot->setAlignment(align);
- // Allow identical constants to be merged if the user allowed it.
- // FIXME: maybe this flag should be set unconditionally, and instead the
- // ConstantMerge pass should be disabled if flag_merge_constants is zero.
- Slot->setUnnamedAddr(flag_merge_constants);
-
- return Slot;
-}
-
-/// AddressOfARRAY_REF - Return the address of an array element or slice.
-static Constant *AddressOfARRAY_REF(tree exp, TargetFolder &Folder) {
- tree array = TREE_OPERAND(exp, 0);
- tree index = TREE_OPERAND(exp, 1);
- tree index_type = main_type(index);
- assert(isa<ARRAY_TYPE>(TREE_TYPE(array)) && "Unknown ARRAY_REF!");
-
- // Check for variable sized reference.
- assert(isSizeCompatible(main_type(main_type(array))) &&
- "Global with variable size?");
-
- // Get the index into the array as an LLVM integer constant.
- Constant *IndexVal = getAsRegister(index, Folder);
-
- // Subtract off the lower bound, if any.
- tree lower_bound = array_ref_low_bound(exp);
- if (!integer_zerop(lower_bound)) {
- // Get the lower bound as an LLVM integer constant.
- Constant *LowerBoundVal = getAsRegister(lower_bound, Folder);
- IndexVal = Folder.CreateSub(IndexVal, LowerBoundVal, hasNUW(index_type),
- hasNSW(index_type));
- }
-
- // Avoid any assumptions about how the array type is represented in LLVM by
- // doing the GEP on a pointer to the first array element.
- Constant *ArrayAddr = AddressOfImpl(array, Folder);
- Type *EltTy = ConvertType(main_type(main_type(array)));
- ArrayAddr = Folder.CreateBitCast(ArrayAddr, EltTy->getPointerTo());
-
- return POINTER_TYPE_OVERFLOW_UNDEFINED ?
- Folder.CreateInBoundsGetElementPtr(ArrayAddr, IndexVal) :
- Folder.CreateGetElementPtr(ArrayAddr, IndexVal);
-}
-
-/// AddressOfCOMPONENT_REF - Return the address of a field in a record.
-static Constant *AddressOfCOMPONENT_REF(tree exp, TargetFolder &Folder) {
- tree field_decl = TREE_OPERAND(exp, 1);
-
- // Compute the field offset in units from the start of the record.
- Constant *Offset;
- if (TREE_OPERAND(exp, 2)) {
- Offset = getAsRegister(TREE_OPERAND(exp, 2), Folder);
- // At this point the offset is measured in units divided by (exactly)
- // (DECL_OFFSET_ALIGN / BITS_PER_UNIT). Convert to units.
- unsigned factor = DECL_OFFSET_ALIGN(field_decl) / BITS_PER_UNIT;
- if (factor != 1)
- Offset = Folder.CreateMul(Offset, ConstantInt::get(Offset->getType(),
- factor));
- } else {
- assert(DECL_FIELD_OFFSET(field_decl) && "Field offset not available!");
- Offset = getAsRegister(DECL_FIELD_OFFSET(field_decl), Folder);
- }
-
- // Here BitStart gives the offset of the field in bits from Offset.
- uint64_t BitStart = getInt64(DECL_FIELD_BIT_OFFSET(field_decl), true);
- // Incorporate as much of it as possible into the pointer computation.
- uint64_t Units = BitStart / BITS_PER_UNIT;
- if (Units > 0) {
- Offset = Folder.CreateAdd(Offset, ConstantInt::get(Offset->getType(),
- Units));
- BitStart -= Units * BITS_PER_UNIT;
- (void)BitStart;
- }
- assert(BitStart == 0 &&
- "It's a bitfield reference or we didn't get to the field!");
-
- Type *UnitPtrTy = GetUnitPointerType(Context);
- Constant *StructAddr = AddressOfImpl(TREE_OPERAND(exp, 0), Folder);
- Constant *FieldPtr = Folder.CreateBitCast(StructAddr, UnitPtrTy);
- FieldPtr = Folder.CreateInBoundsGetElementPtr(FieldPtr, Offset);
-
- return FieldPtr;
-}
-
-/// AddressOfCOMPOUND_LITERAL_EXPR - Return the address of a compound literal.
-static Constant *AddressOfCOMPOUND_LITERAL_EXPR(tree exp, TargetFolder &Folder){
- tree decl = DECL_EXPR_DECL(COMPOUND_LITERAL_EXPR_DECL_EXPR(exp));
- return AddressOfImpl(decl, Folder);
-}
-
-/// AddressOfDecl - Return the address of a global.
-static Constant *AddressOfDecl(tree exp, TargetFolder &) {
- return cast<Constant>(DEFINITION_LLVM(exp));
-}
-
-/// AddressOfINDIRECT_REF - Return the address of a dereference.
-static Constant *AddressOfINDIRECT_REF(tree exp, TargetFolder &Folder) {
- // The address is just the dereferenced operand. Get it as an LLVM constant.
- return getAsRegister(TREE_OPERAND(exp, 0), Folder);
-}
-
-/// AddressOfLABEL_DECL - Return the address of a label.
-static Constant *AddressOfLABEL_DECL(tree exp, TargetFolder &) {
- extern TreeToLLVM *TheTreeToLLVM;
-
- assert(TheTreeToLLVM &&
- "taking the address of a label while not compiling the function!");
-
- // Figure out which function this is for, verify it's the one we're compiling.
- if (DECL_CONTEXT(exp)) {
- assert(isa<FUNCTION_DECL>(DECL_CONTEXT(exp)) &&
- "Address of label in nested function?");
- assert(TheTreeToLLVM->getFUNCTION_DECL() == DECL_CONTEXT(exp) &&
- "Taking the address of a label that isn't in the current fn!?");
- }
-
- return TheTreeToLLVM->AddressOfLABEL_DECL(exp);
-}
-
-#if (GCC_MINOR > 5)
-/// AddressOfMEM_REF - Return the address of a memory reference.
-static Constant *AddressOfMEM_REF(tree exp, TargetFolder &Folder) {
- // The address is the first operand offset in bytes by the second.
- Constant *Addr = getAsRegister(TREE_OPERAND(exp, 0), Folder);
- if (integer_zerop(TREE_OPERAND(exp, 1)))
- return Addr;
-
- // Convert to a byte pointer and displace by the offset.
- Addr = Folder.CreateBitCast(Addr, GetUnitPointerType(Context));
- APInt Delta = getIntegerValue(TREE_OPERAND(exp, 1));
- Constant *Offset = ConstantInt::get(Context, Delta);
- // The address is always inside the referenced object, so "inbounds".
- return Folder.CreateInBoundsGetElementPtr(Addr, Offset);
-}
-#endif
-
-/// AddressOfImpl - Implementation of AddressOf.
-static Constant *AddressOfImpl(tree exp, TargetFolder &Folder) {
- Constant *Addr;
-
- switch (TREE_CODE(exp)) {
- default:
- debug_tree(exp);
- llvm_unreachable("Unknown constant to take the address of!");
- case COMPLEX_CST:
- case FIXED_CST:
- case INTEGER_CST:
- case REAL_CST:
- case STRING_CST:
- case VECTOR_CST:
- Addr = AddressOfSimpleConstant(exp, Folder);
- break;
- case ARRAY_RANGE_REF:
- case ARRAY_REF:
- Addr = AddressOfARRAY_REF(exp, Folder);
- break;
- case COMPONENT_REF:
- Addr = AddressOfCOMPONENT_REF(exp, Folder);
- break;
- case COMPOUND_LITERAL_EXPR:
- Addr = AddressOfCOMPOUND_LITERAL_EXPR(exp, Folder);
- break;
- case CONSTRUCTOR:
- Addr = AddressOfSimpleConstant(exp, Folder);
- break;
- case CONST_DECL:
- case FUNCTION_DECL:
- case VAR_DECL:
- Addr = AddressOfDecl(exp, Folder);
- break;
- case INDIRECT_REF:
-#if (GCC_MINOR < 6)
- case MISALIGNED_INDIRECT_REF:
-#endif
- Addr = AddressOfINDIRECT_REF(exp, Folder);
- break;
- case LABEL_DECL:
- Addr = AddressOfLABEL_DECL(exp, Folder);
- break;
-#if (GCC_MINOR > 5)
- case MEM_REF:
- Addr = AddressOfMEM_REF(exp, Folder);
- break;
-#endif
- }
-
- // Ensure that the address has the expected type. It is simpler to do this
- // once here rather than in every AddressOf helper.
- Type *Ty;
- if (isa<VOID_TYPE>(TREE_TYPE(exp)))
- Ty = GetUnitPointerType(Context); // void* -> i8*.
- else
- Ty = ConvertType(TREE_TYPE(exp))->getPointerTo();
-
- return Folder.CreateBitCast(Addr, Ty);
-}
-
-/// AddressOf - Given an expression with a constant address such as a constant,
-/// a global variable or a label, returns the address. The type of the returned
-/// is always a pointer type and, as long as 'exp' does not have void type, the
-/// type of the pointee is the memory type that corresponds to the type of exp
-/// (see ConvertType).
-Constant *AddressOf(tree exp) {
- TargetFolder Folder(&getTargetData());
- return AddressOfImpl(exp, Folder);
-}
Modified: dragonegg/trunk/src/Convert.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Convert.cpp?rev=157918&r1=157917&r2=157918&view=diff
==============================================================================
--- dragonegg/trunk/src/Convert.cpp (original)
+++ dragonegg/trunk/src/Convert.cpp Mon Jun 4 02:54:40 2012
@@ -23,9 +23,9 @@
// Plugin headers
#include "dragonegg/ABI.h"
#include "dragonegg/Aliasing.h"
-#include "dragonegg/Constants.h"
+#include "dragonegg/ConstantConversion.h"
#include "dragonegg/Debug.h"
-#include "dragonegg/Types.h"
+#include "dragonegg/TypeConversion.h"
// LLVM headers
#include "llvm/Module.h"
Copied: dragonegg/trunk/src/TypeConversion.cpp (from r157915, dragonegg/trunk/src/Types.cpp)
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/TypeConversion.cpp?p2=dragonegg/trunk/src/TypeConversion.cpp&p1=dragonegg/trunk/src/Types.cpp&r1=157915&r2=157918&rev=157918&view=diff
==============================================================================
--- dragonegg/trunk/src/Types.cpp (original)
+++ dragonegg/trunk/src/TypeConversion.cpp Mon Jun 4 02:54:40 2012
@@ -1,4 +1,4 @@
-//===----------- Types.cpp - Converting GCC types to LLVM types -----------===//
+//===------- TypeConversion.cpp - Converting GCC types to LLVM types ------===//
//
// Copyright (C) 2005 to 2012 Chris Lattner, Duncan Sands et al.
//
@@ -23,7 +23,7 @@
// Plugin headers
#include "dragonegg/ABI.h"
#include "dragonegg/Cache.h"
-#include "dragonegg/Types.h"
+#include "dragonegg/TypeConversion.h"
#include "dragonegg/ADT/IntervalList.h"
#include "dragonegg/ADT/Range.h"
Removed: dragonegg/trunk/src/Types.cpp
URL: http://llvm.org/viewvc/llvm-project/dragonegg/trunk/src/Types.cpp?rev=157917&view=auto
==============================================================================
--- dragonegg/trunk/src/Types.cpp (original)
+++ dragonegg/trunk/src/Types.cpp (removed)
@@ -1,1636 +0,0 @@
-//===----------- Types.cpp - Converting GCC types to LLVM types -----------===//
-//
-// Copyright (C) 2005 to 2012 Chris Lattner, Duncan Sands et al.
-//
-// This file is part of DragonEgg.
-//
-// DragonEgg is free software; you can redistribute it and/or modify it under
-// the terms of the GNU General Public License as published by the Free Software
-// Foundation; either version 2, or (at your option) any later version.
-//
-// DragonEgg is distributed in the hope that it will be useful, but WITHOUT ANY
-// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
-// A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-//
-// You should have received a copy of the GNU General Public License along with
-// DragonEgg; see the file COPYING. If not, write to the Free Software
-// Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA.
-//
-//===----------------------------------------------------------------------===//
-// This is the code that converts GCC tree types into LLVM types.
-//===----------------------------------------------------------------------===//
-
-// Plugin headers
-#include "dragonegg/ABI.h"
-#include "dragonegg/Cache.h"
-#include "dragonegg/Types.h"
-#include "dragonegg/ADT/IntervalList.h"
-#include "dragonegg/ADT/Range.h"
-
-// LLVM headers
-#include "llvm/ADT/SCCIterator.h"
-
-// System headers
-#include <gmp.h>
-#include <map>
-
-// GCC headers
-#include "auto-host.h"
-#ifndef ENABLE_BUILD_WITH_CXX
-extern "C" {
-#endif
-#include "config.h"
-// Stop GCC declaring 'getopt' as it can clash with the system's declaration.
-#undef HAVE_DECL_GETOPT
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "tree.h"
-
-#include "flags.h"
-#ifndef ENABLE_BUILD_WITH_CXX
-} // extern "C"
-#endif
-
-// Trees header.
-#include "dragonegg/Trees.h"
-
-static LLVMContext &Context = getGlobalContext();
-
-/// SCCInProgress - Set of mutually dependent types currently being converted.
-static const std::vector<tree_node*> *SCCInProgress;
-
-//===----------------------------------------------------------------------===//
-// ... ContainedTypeIterator ...
-//===----------------------------------------------------------------------===//
-
-/// ContainedTypeIterator - A convenience class for viewing a type as a graph,
-/// where the nodes of the graph are types and there is an edge from type A to
-/// type B iff A "contains" B. A record type contains the types of its fields,
-/// an array type contains the element type, a pointer type contains the type
-/// pointed to and so on. Use the begin, end and increment methods to iterate
-/// over all of the types contained in a given type.
-namespace {
-
- class ContainedTypeIterator {
- /// type_ref - Either a TREE_LIST node, in which case TREE_VALUE gives the
- /// contained type, or some other kind of tree node and TREE_TYPE gives the
- /// contained type. A null value indicates the end iterator.
- tree type_ref;
-
- /// ContainedTypeIterator - Convenience constructor for internal use.
- explicit ContainedTypeIterator(const tree& t) : type_ref(t) {}
-
- public:
- /// Dereference operator.
- tree operator*() {
- return isa<TREE_LIST>(type_ref) ?
- TREE_VALUE(type_ref) : TREE_TYPE(type_ref);
- };
-
- /// Comparison operators.
- bool operator==(const ContainedTypeIterator &other) const {
- return other.type_ref == this->type_ref;
- }
- bool operator!=(const ContainedTypeIterator &other) const {
- return !(*this == other);
- }
-
- /// Prefix increment operator.
- ContainedTypeIterator& operator++() {
- assert(type_ref && "Incrementing end iterator!");
-
- switch (TREE_CODE(type_ref)) {
- default:
- debug_tree(type_ref);
- llvm_unreachable("Unexpected tree kind!");
- case ARRAY_TYPE:
- case COMPLEX_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- case VECTOR_TYPE:
- // Here type_ref is the type being iterated over. These types all have
- // only one contained type, so incrementing returns the end iterator.
- type_ref = 0;
- break;
-
- case FIELD_DECL:
- // Here type_ref is a field of the record or union type being iterated
- // over. Move on to the next field.
- do
- type_ref = TREE_CHAIN(type_ref);
- while (type_ref && !isa<FIELD_DECL>(type_ref));
- break;
-
- case FUNCTION_TYPE:
- case METHOD_TYPE:
- // Here type_ref is the type being iterated over and the iterator refers
- // to the function return type. Move on to the first function argument
- // (a TREE_LIST node).
- type_ref = TYPE_ARG_TYPES(type_ref);
- break;
-
- case TREE_LIST:
- // Here type_ref belongs to the argument list of the function or method
- // being iterated over. Move on to the next function argument.
- type_ref = TREE_CHAIN(type_ref);
- // If the function takes a fixed number of arguments then the argument
- // list is terminated by void_list_node. This is not a real argument.
- if (type_ref == void_list_node)
- type_ref = 0;
- break;
- }
-
- return *this;
- }
-
- /// begin - Return an iterator referring to the first type contained in the
- /// given type.
- static ContainedTypeIterator begin(tree type) {
- switch (TREE_CODE(type)) {
- default:
- llvm_unreachable("Unknown type!");
-
- case BOOLEAN_TYPE:
- case ENUMERAL_TYPE:
- case FIXED_POINT_TYPE:
- case INTEGER_TYPE:
- case OFFSET_TYPE:
- case REAL_TYPE:
- case VOID_TYPE:
- // No contained types.
- return end();
-
- case ARRAY_TYPE:
- case COMPLEX_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- case VECTOR_TYPE:
- // Use the type itself as the "pointer" to the contained type.
- return ContainedTypeIterator(type);
-
- case QUAL_UNION_TYPE:
- case RECORD_TYPE:
- case UNION_TYPE:
- // The contained types are the types of the record's fields. Use the
- // first FIELD_DECL as the "pointer" to the first contained type.
- for (tree field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field))
- if (isa<FIELD_DECL>(field))
- return ContainedTypeIterator(field);
- return end();
-
- case FUNCTION_TYPE:
- case METHOD_TYPE:
- // The contained types are the return type and the argument types (in
- // the case of METHOD_TYPE nothing special needs to be done for 'this'
- // since it occurs explicitly in the argument list). Return the type
- // itself as the "pointer" to the return type; incrementing will move
- // the iterator on to the argument types.
- // Note that static chains for nested functions cannot be obtained from
- // the function type which is why there is no attempt to handle them.
- return ContainedTypeIterator(type);
- }
- }
-
- /// end - Return the end iterator for contained type iteration.
- static ContainedTypeIterator end() {
- return ContainedTypeIterator(0);
- }
- };
-
-} // Unnamed namespace.
-
-
-//===----------------------------------------------------------------------===//
-// Utilities
-//===----------------------------------------------------------------------===//
-
-/// ArrayLengthOf - Returns the length of the given gcc array type, or NO_LENGTH
-/// if the array has variable or unknown length.
-uint64_t ArrayLengthOf(tree type) {
- assert(isa<ARRAY_TYPE>(type) && "Only for array types!");
- // Workaround for missing sanity checks in older versions of GCC.
- if ((GCC_MINOR == 5 && GCC_MICRO < 3) || (GCC_MINOR == 6 && GCC_MICRO < 2))
- if (!TYPE_DOMAIN(type) || !TYPE_MAX_VALUE(TYPE_DOMAIN(type)))
- return NO_LENGTH;
- tree range = array_type_nelts(type); // The number of elements minus one.
- // Bail out if the array has variable or unknown length.
- if (!isInt64(range, false))
- return NO_LENGTH;
- int64_t Range = (int64_t)getInt64(range, false);
- return Range < 0 ? 0 : 1 + (uint64_t)Range;
-}
-
-/// set_decl_index - Associate a non-negative number with the given GCC
-/// field declaration.
-static int set_decl_index(tree t, int i) {
- assert(i >= 0 && "Negative indices not allowed!");
- setCachedInteger(t, i);
- return i;
-}
-
-/// get_decl_index - Get the non-negative number associated with the given GCC
-/// field decl. Returns a negative value if no such association has been made.
-static int get_decl_index(tree t) {
- int Idx;
- if (getCachedInteger(t, Idx))
- return Idx;
- return -1;
-}
-
-/// GetFieldIndex - Return the index of the field in the given LLVM type that
-/// corresponds to the GCC field declaration 'decl'. This means that the LLVM
-/// and GCC fields start in the same byte (if 'decl' is a bitfield, this means
-/// that its first bit is within the byte the LLVM field starts at). Returns
-/// INT_MAX if there is no such LLVM field.
-int GetFieldIndex(tree decl, Type *Ty) {
- assert(isa<FIELD_DECL>(decl) && "Expected a FIELD_DECL!");
- // FIXME: The following test sometimes fails when compiling Fortran90 because
- // DECL_CONTEXT does not point to the containing type, but some other type!
-// assert(Ty == ConvertType(DECL_CONTEXT(decl)) && "Field not for this type!");
-
- // If we previously cached the field index, return the cached value.
- unsigned Index = (unsigned)get_decl_index(decl);
- if (Index <= INT_MAX)
- return Index;
-
- // TODO: At this point we could process all fields of DECL_CONTEXT(decl), and
- // incrementally advance over the StructLayout. This would make indexing be
- // O(N) rather than O(N log N) if all N fields are used. It's not clear if it
- // would really be a win though.
-
- StructType *STy = dyn_cast<StructType>(Ty);
- // If this is not a struct type, then for sure there is no corresponding LLVM
- // field (we do not require GCC record types to be converted to LLVM structs).
- if (!STy)
- return set_decl_index(decl, INT_MAX);
-
- // If this is an empty struct then there is no corresponding LLVM field.
- if (STy->element_begin() == STy->element_end())
- return set_decl_index(decl, INT_MAX);
-
- // If the field declaration is at a variable or humongous offset then there
- // can be no corresponding LLVM field.
- if (!OffsetIsLLVMCompatible(decl))
- return set_decl_index(decl, INT_MAX);
-
- // Find the LLVM field that contains the first bit of the GCC field.
- uint64_t OffsetInBytes = getFieldOffsetInBits(decl) / 8; // Ignore bit in byte
- const StructLayout *SL = getTargetData().getStructLayout(STy);
- Index = SL->getElementContainingOffset(OffsetInBytes);
-
- // The GCC field must start in the first byte of the LLVM field.
- if (OffsetInBytes != SL->getElementOffset(Index))
- return set_decl_index(decl, INT_MAX);
-
- // We are not able to cache values bigger than INT_MAX, so bail out if the
- // LLVM field index is that huge.
- if (Index >= INT_MAX)
- return set_decl_index(decl, INT_MAX);
-
- // Found an appropriate LLVM field - return it.
- return set_decl_index(decl, Index);
-}
-
-/// getPointerToType - Returns the LLVM register type to use for a pointer to
-/// the given GCC type.
-Type *getPointerToType(tree type) {
- if (isa<VOID_TYPE>(type))
- // void* -> byte*
- return GetUnitPointerType(Context);
- // FIXME: Handle address spaces.
- return ConvertType(type)->getPointerTo();
-}
-
-/// GetUnitType - Returns an integer one address unit wide if 'NumUnits' is 1;
-/// otherwise returns an array of such integers with 'NumUnits' elements. For
-/// example, on a machine which has 16 bit bytes returns an i16 or an array of
-/// i16.
-Type *GetUnitType(LLVMContext &C, unsigned NumUnits) {
- // The following assertion is here because just about every place that calls
- // this routine implicitly assumes this.
- assert(!(BITS_PER_UNIT & 7) && "Unit size not a multiple of 8 bits!");
- Type *UnitTy = IntegerType::get(C, BITS_PER_UNIT);
- if (NumUnits == 1)
- return UnitTy;
- return ArrayType::get(UnitTy, NumUnits);
-}
-
-/// GetUnitPointerType - Returns an LLVM pointer type which points to memory one
-/// address unit wide. For example, on a machine which has 16 bit bytes returns
-/// an i16*.
-Type *GetUnitPointerType(LLVMContext &C, unsigned AddrSpace) {
- return GetUnitType(C)->getPointerTo(AddrSpace);
-}
-
-/// isSized - Return true if the GCC type has a size, perhaps variable. Note
-/// that this returns false for function types, for which the GCC type size
-/// doesn't represent anything useful for us.
-static bool isSized(tree type) {
- if (isa<FUNCTION_TYPE>(type) || isa<METHOD_TYPE>(type))
- return false;
- return TYPE_SIZE(type);
-}
-
-/// isSizeCompatible - Return true if the specified gcc type is guaranteed to be
-/// turned by ConvertType into an LLVM type of the same size (i.e. TYPE_SIZE the
-/// same as getTypeAllocSizeInBits).
-bool isSizeCompatible(tree type) {
- if (!isSized(type))
- return false;
- return isInt64(TYPE_SIZE(type), true);
-}
-
-
-//===----------------------------------------------------------------------===//
-// Matching LLVM types with GCC trees
-//===----------------------------------------------------------------------===//
-
-static Type *CheckTypeConversion(tree type, Type *Ty) {
-#ifndef NDEBUG
- bool Mismatch = false;
- // If the GCC type has a size, check that the LLVM type does too. Note that
- // the LLVM type may have a size when the GCC type does not. For example a
- // C variable length array int[] may be converted into [0 x i32].
- if (isSized(type) && !Ty->isSized()) {
- Mismatch = true;
- errs() << "The GCC type has a size but the LLVM type does not!\n";
- }
- // Check that the LLVM and GCC types really do have the same size when we say
- // they do.
- if (isSizeCompatible(type) && Ty->isSized()) {
- uint64_t GCCSize = getInt64(TYPE_SIZE(type), true);
- uint64_t LLVMSize = getTargetData().getTypeAllocSizeInBits(Ty);
- if (LLVMSize != GCCSize) {
- Mismatch = true;
- errs() << "GCC size: " << GCCSize << "; LLVM size: " << LLVMSize
- << "!\n";
- }
- }
- // Check that the LLVM type has the same alignment or less than the GCC type.
- if (Ty->isSized()) {
- unsigned GCCAlign = TYPE_ALIGN(type);
- unsigned LLVMAlign = getTargetData().getABITypeAlignment(Ty) * 8;
- if (LLVMAlign > GCCAlign) {
- Mismatch = true;
- errs() << "GCC align: " << GCCAlign << "; LLVM align: " << LLVMAlign
- << "\n";
- }
- }
- if (Mismatch) {
- errs() << "GCC: ";
- debug_tree(type);
- errs() << "LLVM: ";
- Ty->print(errs());
- llvm_unreachable("\nLLVM type doesn't represent GCC type!");
- }
-#endif
-
- (void)type;
- return Ty;
-}
-
-// RememberTypeConversion - Associate an LLVM type with a GCC type.
-// These are lazily computed by ConvertType.
-static Type *RememberTypeConversion(tree type, Type *Ty) {
- CheckTypeConversion(type, Ty);
- setCachedType(type, Ty);
- return Ty;
-}
-
-
-//===----------------------------------------------------------------------===//
-// Type Conversion Utilities
-//===----------------------------------------------------------------------===//
-
-// isPassedByInvisibleReference - Return true if an argument of the specified
-// type should be passed in by invisible reference.
-//
-bool isPassedByInvisibleReference(tree Type) {
- // Don't crash in this case.
- if (Type == error_mark_node)
- return false;
-
- // FIXME: Search for TREE_ADDRESSABLE in calls.c, and see if there are other
- // cases that make arguments automatically passed in by reference.
- return TREE_ADDRESSABLE(Type) || TYPE_SIZE(Type) == 0 ||
- !isa<INTEGER_CST>(TYPE_SIZE(Type));
-}
-
-
-//===----------------------------------------------------------------------===//
-// ... getRegType ...
-//===----------------------------------------------------------------------===//
-
-/// getRegType - Returns the LLVM type to use for registers that hold a value
-/// of the scalar GCC type 'type'. All of the EmitReg* routines use this to
-/// determine the LLVM type to return.
-Type *getRegType(tree type) {
- // Check that the type mode doesn't depend on the type variant (various bits
- // of the plugin rely on this).
- assert(TYPE_MODE(type) == TYPE_MODE(TYPE_MAIN_VARIANT(type))
- && "Type mode differs between variants!");
-
- // LLVM doesn't care about variants such as const, volatile, or restrict.
- type = TYPE_MAIN_VARIANT(type);
-
- // NOTE: Any changes made here need to be reflected in LoadRegisterFromMemory,
- // StoreRegisterToMemory and ExtractRegisterFromConstant.
- assert(!isa<AGGREGATE_TYPE>(type) && "Registers must have a scalar type!");
- assert(!isa<VOID_TYPE>(type) && "Registers cannot have void type!");
-
- switch (TREE_CODE(type)) {
-
- default:
- debug_tree(type);
- llvm_unreachable("Unknown register type!");
-
- case BOOLEAN_TYPE:
- case ENUMERAL_TYPE:
- case INTEGER_TYPE:
- // For integral types, convert based on the type precision. For example,
- // this turns bool into i1 while ConvertType probably turns it into i8 or
- // i32.
- return IntegerType::get(Context, TYPE_PRECISION(type));
-
- case COMPLEX_TYPE: {
- Type *EltTy = getRegType(TREE_TYPE(type));
- return StructType::get(EltTy, EltTy, NULL);
- }
-
- case OFFSET_TYPE:
- return getTargetData().getIntPtrType(Context);
-
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- // void* -> byte*
- return isa<VOID_TYPE>(TREE_TYPE(type)) ? GetUnitPointerType(Context) :
- ConvertType(TREE_TYPE(type))->getPointerTo();
-
- case REAL_TYPE:
- if (TYPE_PRECISION(type) == 32)
- return Type::getFloatTy(Context);
- if (TYPE_PRECISION(type) == 64)
- return Type::getDoubleTy(Context);
- if (TYPE_PRECISION(type) == 80)
- return Type::getX86_FP80Ty(Context);
- if (TYPE_PRECISION(type) == 128)
-#ifdef TARGET_POWERPC
- return Type::getPPC_FP128Ty(Context);
-#else
- // IEEE quad precision.
- return Type::getFP128Ty(Context);
-#endif
- debug_tree(type);
- llvm_unreachable("Unknown FP type!");
-
- case VECTOR_TYPE: {
- // LLVM does not support vectors of pointers, so turn any pointers into
- // integers.
- Type *EltTy = isa<ACCESS_TYPE>(TREE_TYPE(type)) ?
- getTargetData().getIntPtrType(Context) : getRegType(TREE_TYPE(type));
- return VectorType::get(EltTy, TYPE_VECTOR_SUBPARTS(type));
- }
-
- }
-}
-
-
-//===----------------------------------------------------------------------===//
-// ... ConvertType ...
-//===----------------------------------------------------------------------===//
-
-static Type *ConvertArrayTypeRecursive(tree type) {
- Type *ElementTy = ConvertType(TREE_TYPE(type));
- uint64_t NumElements = ArrayLengthOf(type);
-
- if (NumElements == NO_LENGTH) // Variable length array?
- NumElements = 0;
-
- // Create the array type.
- Type *Ty = ArrayType::get(ElementTy, NumElements);
-
- // If the array is underaligned, wrap it in a packed struct.
- if (TYPE_ALIGN(type) < TYPE_ALIGN(main_type(type)))
- Ty = StructType::get(Context, Ty, /*isPacked*/ true);
-
- // If the user increased the alignment of the array element type, then the
- // size of the array is rounded up by that alignment even though the size
- // of the array element type is not (!). Correct for this if necessary by
- // adding padding. May also need padding if the element type has variable
- // size and the array type has variable length, but by a miracle the product
- // gives a constant size.
- if (isInt64(TYPE_SIZE(type), true)) {
- uint64_t PadBits = getInt64(TYPE_SIZE(type), true) -
- getTargetData().getTypeAllocSizeInBits(Ty);
- if (PadBits) {
- Type *Padding = ArrayType::get(Type::getInt8Ty(Context), PadBits / 8);
- Ty = StructType::get(Ty, Padding, NULL);
- }
- }
-
- return Ty;
-}
-
-namespace {
- class FunctionTypeConversion : public DefaultABIClient {
- Type *&RetTy;
- SmallVectorImpl<Type*> &ArgTypes;
- CallingConv::ID &CallingConv;
- unsigned Offset;
- bool isShadowRet;
- bool KNRPromotion;
- public:
- FunctionTypeConversion(Type *&retty, SmallVectorImpl<Type*> &AT,
- CallingConv::ID &CC, bool KNR)
- : RetTy(retty), ArgTypes(AT), CallingConv(CC), Offset(0),
- KNRPromotion(KNR) {
- CallingConv = CallingConv::C;
- isShadowRet = false;
- }
-
- /// getCallingConv - This provides the desired CallingConv for the function.
- CallingConv::ID getCallingConv(void) { return CallingConv; }
-
- bool isShadowReturn() const { return isShadowRet; }
-
- /// HandleScalarResult - This callback is invoked if the function returns a
- /// simple scalar result value.
- void HandleScalarResult(Type *RTy) {
- this->RetTy = RTy;
- }
-
- /// HandleAggregateResultAsScalar - This callback is invoked if the function
- /// returns an aggregate value by bit converting it to the specified scalar
- /// type and returning that.
- void HandleAggregateResultAsScalar(Type *ScalarTy, unsigned Off=0) {
- RetTy = ScalarTy;
- this->Offset = Off;
- }
-
- /// HandleAggregateResultAsAggregate - This callback is invoked if the function
- /// returns an aggregate value using multiple return values.
- void HandleAggregateResultAsAggregate(Type *AggrTy) {
- RetTy = AggrTy;
- }
-
- /// HandleShadowResult - Handle an aggregate or scalar shadow argument.
- void HandleShadowResult(PointerType *PtrArgTy, bool RetPtr) {
- // This function either returns void or the shadow argument,
- // depending on the target.
- RetTy = RetPtr ? PtrArgTy : Type::getVoidTy(Context);
-
- // In any case, there is a dummy shadow argument though!
- ArgTypes.push_back(PtrArgTy);
-
- // Also, note the use of a shadow argument.
- isShadowRet = true;
- }
-
- /// HandleAggregateShadowResult - This callback is invoked if the function
- /// returns an aggregate value by using a "shadow" first parameter, which is
- /// a pointer to the aggregate, of type PtrArgTy. If RetPtr is set to true,
- /// the pointer argument itself is returned from the function.
- void HandleAggregateShadowResult(PointerType *PtrArgTy,
- bool RetPtr) {
- HandleShadowResult(PtrArgTy, RetPtr);
- }
-
- /// HandleScalarShadowResult - This callback is invoked if the function
- /// returns a scalar value by using a "shadow" first parameter, which is a
- /// pointer to the scalar, of type PtrArgTy. If RetPtr is set to true,
- /// the pointer argument itself is returned from the function.
- void HandleScalarShadowResult(PointerType *PtrArgTy, bool RetPtr) {
- HandleShadowResult(PtrArgTy, RetPtr);
- }
-
- void HandlePad(llvm::Type *LLVMTy) {
- HandleScalarArgument(LLVMTy, 0, 0);
- }
-
- void HandleScalarArgument(llvm::Type *LLVMTy, tree type,
- unsigned /*RealSize*/ = 0) {
- if (KNRPromotion) {
- if (type == float_type_node)
- LLVMTy = ConvertType(double_type_node);
- else if (LLVMTy->isIntegerTy(16) || LLVMTy->isIntegerTy(8) ||
- LLVMTy->isIntegerTy(1))
- LLVMTy = Type::getInt32Ty(Context);
- }
- ArgTypes.push_back(LLVMTy);
- }
-
- /// HandleByInvisibleReferenceArgument - This callback is invoked if a pointer
- /// (of type PtrTy) to the argument is passed rather than the argument itself.
- void HandleByInvisibleReferenceArgument(llvm::Type *PtrTy,
- tree /*type*/) {
- ArgTypes.push_back(PtrTy);
- }
-
- /// HandleByValArgument - This callback is invoked if the aggregate function
- /// argument is passed by value. It is lowered to a parameter passed by
- /// reference with an additional parameter attribute "ByVal".
- void HandleByValArgument(llvm::Type *LLVMTy, tree type) {
- HandleScalarArgument(LLVMTy->getPointerTo(), type);
- }
-
- /// HandleFCAArgument - This callback is invoked if the aggregate function
- /// argument is a first class aggregate passed by value.
- void HandleFCAArgument(llvm::Type *LLVMTy, tree /*type*/) {
- ArgTypes.push_back(LLVMTy);
- }
- };
-}
-
-static Attributes HandleArgumentExtension(tree ArgTy) {
- if (isa<BOOLEAN_TYPE>(ArgTy)) {
- if (TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)) < INT_TYPE_SIZE)
- return Attribute::ZExt;
- } else if (isa<INTEGER_TYPE>(ArgTy) &&
- TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)) < INT_TYPE_SIZE) {
- if (TYPE_UNSIGNED(ArgTy))
- return Attribute::ZExt;
- else
- return Attribute::SExt;
- }
-
- return Attribute::None;
-}
-
-/// ConvertParamListToLLVMSignature - This method is used to build the argument
-/// type list for K&R prototyped functions. In this case, we have to figure out
-/// the type list (to build a FunctionType) from the actual DECL_ARGUMENTS list
-/// for the function. This method takes the DECL_ARGUMENTS list (Args), and
-/// fills in Result with the argument types for the function. It returns the
-/// specified result type for the function.
-FunctionType *ConvertArgListToFnType(tree type, ArrayRef<tree> Args,
- tree static_chain, bool KNRPromotion,
- CallingConv::ID &CallingConv,
- AttrListPtr &PAL) {
- tree ReturnType = TREE_TYPE(type);
- SmallVector<Type*, 8> ArgTys;
- Type *RetTy(Type::getVoidTy(Context));
-
- FunctionTypeConversion Client(RetTy, ArgTys, CallingConv, KNRPromotion);
- DefaultABI ABIConverter(Client);
-
-#ifdef TARGET_ADJUST_LLVM_CC
- TARGET_ADJUST_LLVM_CC(CallingConv, type);
-#endif
-
- // Builtins are always prototyped, so this isn't one.
- ABIConverter.HandleReturnType(ReturnType, current_function_decl, false);
-
- SmallVector<AttributeWithIndex, 8> Attrs;
-
- // Compute whether the result needs to be zext or sext'd.
- Attributes RAttributes = HandleArgumentExtension(ReturnType);
-
- // Allow the target to change the attributes.
-#ifdef TARGET_ADJUST_LLVM_RETATTR
- TARGET_ADJUST_LLVM_RETATTR(RAttributes, type);
-#endif
-
- if (RAttributes != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RAttributes));
-
- // If this function returns via a shadow argument, the dest loc is passed
- // in as a pointer. Mark that pointer as struct-ret and noalias.
- if (ABIConverter.isShadowReturn())
- Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
- Attribute::StructRet | Attribute::NoAlias));
-
- std::vector<Type*> ScalarArgs;
- if (static_chain) {
- // Pass the static chain as the first parameter.
- ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
- // Mark it as the chain argument.
- Attrs.push_back(AttributeWithIndex::get(ArgTys.size(),
- Attribute::Nest));
- }
-
- for (ArrayRef<tree>::iterator I = Args.begin(), E = Args.end(); I != E; ++I) {
- tree ArgTy = TREE_TYPE(*I);
-
- // Determine if there are any attributes for this param.
- Attributes PAttributes = Attribute::None;
-
- ABIConverter.HandleArgument(ArgTy, ScalarArgs, &PAttributes);
-
- // Compute zext/sext attributes.
- PAttributes |= HandleArgumentExtension(ArgTy);
-
- // Compute noalias attributes.
- if (isa<ACCESS_TYPE>(ArgTy) && TYPE_RESTRICT(ArgTy))
- PAttributes |= Attribute::NoAlias;
-
- if (PAttributes != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(ArgTys.size(), PAttributes));
- }
-
- PAL = AttrListPtr::get(Attrs);
- return FunctionType::get(RetTy, ArgTys, false);
-}
-
-FunctionType *ConvertFunctionType(tree type, tree decl, tree static_chain,
- CallingConv::ID &CallingConv,
- AttrListPtr &PAL) {
- Type *RetTy = Type::getVoidTy(Context);
- SmallVector<Type*, 8> ArgTypes;
- FunctionTypeConversion Client(RetTy, ArgTypes, CallingConv, false/*not K&R*/);
- DefaultABI ABIConverter(Client);
-
- // Allow the target to set the CC for things like fastcall etc.
-#ifdef TARGET_ADJUST_LLVM_CC
- TARGET_ADJUST_LLVM_CC(CallingConv, type);
-#endif
-
- ABIConverter.HandleReturnType(TREE_TYPE(type), current_function_decl,
- decl ? DECL_BUILT_IN(decl) : false);
-
- // Compute attributes for return type (and function attributes).
- SmallVector<AttributeWithIndex, 8> Attrs;
- Attributes FnAttributes = Attribute::None;
-
- int flags = flags_from_decl_or_type(decl ? decl : type);
-
- // Check for 'readnone' and 'readonly' function attributes.
- if (flags & ECF_CONST)
- FnAttributes |= Attribute::ReadNone;
- else if (flags & ECF_PURE)
- FnAttributes |= Attribute::ReadOnly;
-
- // TODO: Handle ECF_LOOPING_CONST_OR_PURE
-
- // Check for 'noreturn' function attribute.
- if (flags & ECF_NORETURN)
- FnAttributes |= Attribute::NoReturn;
-
- // Check for 'nounwind' function attribute.
- if (flags & ECF_NOTHROW)
- FnAttributes |= Attribute::NoUnwind;
-
- // Check for 'returnstwice' function attribute.
- if (flags & ECF_RETURNS_TWICE)
- FnAttributes |= Attribute::ReturnsTwice;
-
- // Since they write the return value through a pointer,
- // 'sret' functions cannot be 'readnone' or 'readonly'.
- if (ABIConverter.isShadowReturn())
- FnAttributes &= ~(Attribute::ReadNone|Attribute::ReadOnly);
-
- // Demote 'readnone' nested functions to 'readonly' since
- // they may need to read through the static chain.
- if (static_chain && (FnAttributes & Attribute::ReadNone)) {
- FnAttributes &= ~Attribute::ReadNone;
- FnAttributes |= Attribute::ReadOnly;
- }
-
- // Compute whether the result needs to be zext or sext'd.
- Attributes RAttributes = Attribute::None;
- RAttributes |= HandleArgumentExtension(TREE_TYPE(type));
-
- // Allow the target to change the attributes.
-#ifdef TARGET_ADJUST_LLVM_RETATTR
- TARGET_ADJUST_LLVM_RETATTR(RAttributes, type);
-#endif
-
- // The value returned by a 'malloc' function does not alias anything.
- if (flags & ECF_MALLOC)
- RAttributes |= Attribute::NoAlias;
-
- if (RAttributes != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(0, RAttributes));
-
- // If this function returns via a shadow argument, the dest loc is passed
- // in as a pointer. Mark that pointer as struct-ret and noalias.
- if (ABIConverter.isShadowReturn())
- Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
- Attribute::StructRet | Attribute::NoAlias));
-
- std::vector<Type*> ScalarArgs;
- if (static_chain) {
- // Pass the static chain as the first parameter.
- ABIConverter.HandleArgument(TREE_TYPE(static_chain), ScalarArgs);
- // Mark it as the chain argument.
- Attrs.push_back(AttributeWithIndex::get(ArgTypes.size(),
- Attribute::Nest));
- }
-
-#ifdef LLVM_TARGET_ENABLE_REGPARM
- // If the target has regparam parameters, allow it to inspect the function
- // type.
- int local_regparam = 0;
- int local_fp_regparam = 0;
- LLVM_TARGET_INIT_REGPARM(local_regparam, local_fp_regparam, type);
-#endif // LLVM_TARGET_ENABLE_REGPARM
-
- // Keep track of whether we see a byval argument.
- bool HasByVal = false;
-
- // Check if we have a corresponding decl to inspect.
- tree DeclArgs = (decl) ? DECL_ARGUMENTS(decl) : NULL;
- // Loop over all of the arguments, adding them as we go.
- tree Args = TYPE_ARG_TYPES(type);
- for (; Args && TREE_VALUE(Args) != void_type_node; Args = TREE_CHAIN(Args)){
- tree ArgTy = TREE_VALUE(Args);
- if (!isPassedByInvisibleReference(ArgTy))
- if (const StructType *STy = dyn_cast<StructType>(ConvertType(ArgTy)))
- if (STy->isOpaque()) {
- // If we are passing an opaque struct by value, we don't know how many
- // arguments it will turn into. Because we can't handle this yet,
- // codegen the prototype as (...).
- if (CallingConv == CallingConv::C)
- ArgTypes.clear();
- else
- // Don't nuke last argument.
- ArgTypes.erase(ArgTypes.begin()+1, ArgTypes.end());
- Args = 0;
- break;
- }
-
- // Determine if there are any attributes for this param.
- Attributes PAttributes = Attribute::None;
-
- unsigned OldSize = ArgTypes.size();
-
- ABIConverter.HandleArgument(ArgTy, ScalarArgs, &PAttributes);
-
- // Compute zext/sext attributes.
- PAttributes |= HandleArgumentExtension(ArgTy);
-
- // Compute noalias attributes. If we have a decl for the function
- // inspect it for restrict qualifiers, otherwise try the argument
- // types.
- tree RestrictArgTy = (DeclArgs) ? TREE_TYPE(DeclArgs) : ArgTy;
- if (isa<ACCESS_TYPE>(RestrictArgTy) && TYPE_RESTRICT(RestrictArgTy))
- PAttributes |= Attribute::NoAlias;
-
-#ifdef LLVM_TARGET_ENABLE_REGPARM
- // Allow the target to mark this as inreg.
- if (isa<INTEGRAL_TYPE>(ArgTy) || isa<ACCESS_TYPE>(ArgTy) ||
- isa<REAL_TYPE>(ArgTy))
- LLVM_ADJUST_REGPARM_ATTRIBUTE(PAttributes, ArgTy,
- TREE_INT_CST_LOW(TYPE_SIZE(ArgTy)),
- local_regparam, local_fp_regparam);
-#endif // LLVM_TARGET_ENABLE_REGPARM
-
- if (PAttributes != Attribute::None) {
- HasByVal |= (PAttributes & Attribute::ByVal) != Attribute::None;
-
- // If the argument is split into multiple scalars, assign the
- // attributes to all scalars of the aggregate.
- for (unsigned i = OldSize + 1; i <= ArgTypes.size(); ++i) {
- Attrs.push_back(AttributeWithIndex::get(i, PAttributes));
- }
- }
-
- if (DeclArgs)
- DeclArgs = TREE_CHAIN(DeclArgs);
- }
-
- // If there is a byval argument then it is not safe to mark the function
- // 'readnone' or 'readonly': gcc permits a 'const' or 'pure' function to
- // write to struct arguments passed by value, but in LLVM this becomes a
- // write through the byval pointer argument, which LLVM does not allow for
- // readonly/readnone functions.
- if (HasByVal)
- FnAttributes &= ~(Attribute::ReadNone | Attribute::ReadOnly);
-
- assert(RetTy && "Return type not specified!");
-
- if (FnAttributes != Attribute::None)
- Attrs.push_back(AttributeWithIndex::get(~0, FnAttributes));
-
- // Finally, make the function type and result attributes.
- PAL = AttrListPtr::get(Attrs);
- return FunctionType::get(RetTy, ArgTypes, Args == 0);
-}
-
-static Type *ConvertPointerTypeRecursive(tree type) {
- // This is where self-recursion loops are broken, by not converting the type
- // pointed to if this would cause trouble (the pointer type is turned into
- // {}* instead).
- tree pointee = main_type(type);
-
- // The pointer type is in the strongly connected component (SCC) currently
- // being converted. Check whether the pointee is as well. If there is more
- // than one type in the SCC then necessarily the pointee type is in the SCC
- // since any path from the pointer type to the other type necessarily passes
- // via the pointee. If the pointer type is the only element of the SCC then
- // the pointee is only in the SCC if it is equal to the pointer.
- bool bothInSCC = SCCInProgress->size() != 1 || pointee == type;
-
- Type *PointeeTy;
- if (!bothInSCC) {
- // It is safe to convert the pointee. This is the common case, as we get
- // here for pointers to integers and so on.
- PointeeTy = ConvertType(pointee);
- if (PointeeTy->isVoidTy())
- PointeeTy = GetUnitType(Context); // void* -> byte*.
- } else {
- // Both the pointer and the pointee type are in the SCC so it is not safe
- // to convert the pointee type - otherwise we would get an infinite loop.
- // However if a type, for example an opaque struct placeholder, has been
- // registered for the pointee then we can return a pointer to it, giving
- // nicer IR (this is not needed for correctness). Note that some members
- // of the SCC may have been converted already at this point (for this to
- // happen there must be more than one pointer type in the SCC), and thus
- // will have LLVM types registered for them. Unfortunately which types
- // have been converted depends on the order in which we visit the SCC, and
- // that is not an intrinsic property of the SCC. This is why we choose to
- // only use the types registered for records and unions - these are always
- // available. As a further attempt to improve the IR, we return an S* for
- // an array type S[N] if (recursively) S is a record or union type.
-
- // Drill down through nested arrays to the ultimate element type. Thanks
- // to this we may return S* for a (S[])*, which is better than {}*.
- while (isa<ARRAY_TYPE>(pointee))
- pointee = main_type(pointee);
-
- // If the pointee is a record or union type then return a pointer to its
- // placeholder type. Otherwise return {}*.
- if (isa<RECORD_OR_UNION_TYPE>(pointee))
- PointeeTy = getCachedType(pointee);
- else
- PointeeTy = StructType::get(Context);
- }
-
- return PointeeTy->getPointerTo();
-}
-
-typedef Range<uint64_t> BitRange;
-
-/// TypedRange - A type that applies to a range of bits. Any part of the type
-/// outside of the range is discarded. The range may be bigger than the type
-/// in which case any extra bits have an undefined type.
-namespace {
-
-class TypedRange {
- BitRange R; // The range of bits occupied by the type.
- Type *Ty; // The type. May be null if the range is empty.
- uint64_t Starts; // The first bit of the type is positioned at this offset.
-
- TypedRange(BitRange r, Type *t, uint64_t starts) :
- R(r), Ty(t), Starts(starts) {
- assert((R.empty() || Ty) && "Need type when range not empty!");
- }
-
- /// isSafeToReturnContentsDirectly - Return whether the current value for the
- /// type properly represents the bits in the range and so can be handed to the
- /// user as is.
- bool isSafeToReturnContentsDirectly(const TargetData &TD) const {
- // If there is no type (allowed when the range is empty) then one needs to
- // be created.
- if (!Ty)
- return false;
- // If the first bit of the type is not the first bit of the range then it
- // needs to be displaced before being passed to the user.
- if (!R.empty() && R.getFirst() != Starts)
- return false;
- // Check that the type isn't something like i17. Avoiding types like this
- // is not needed for correctness, but makes life easier for the optimizers.
- if ((Ty->getPrimitiveSizeInBits() % BITS_PER_UNIT) != 0)
- return false;
- // If the type is wider than the range then it needs to be truncated before
- // being passed to the user.
- uint64_t AllocBits = TD.getTypeAllocSizeInBits(Ty);
- return AllocBits <= R.getWidth();
- }
-
-public:
- /// get - Use the given type for the range [first, last).
- static TypedRange get(uint64_t first, uint64_t last, Type *Ty) {
- return TypedRange(BitRange(first, last), Ty, first);
- }
-
- // Copy assignment operator.
- TypedRange &operator=(const TypedRange &other) {
- R = other.R; Ty = other.Ty; Starts = other.Starts;
- return *this;
- }
-
- /// getRange - Return the range occupied by this field.
- BitRange getRange() const { return R; }
-
- /// ChangeRangeTo - Change the range occupied by this field.
- void ChangeRangeTo(BitRange r) { R = r; }
-
- /// JoinWith - Form the union of this field with another field (which must be
- /// disjoint from this one). After this the range will be the convex hull of
- /// the ranges of the two fields.
- void JoinWith(const TypedRange &S);
-
- /// extractContents - Return the contained bits as a type which covers every
- /// defined bit in the range, yet is guaranteed to have alloc size no larger
- /// than the width of the range. Unlike the other methods for this class this
- /// one requires that the width of the range be a multiple of an address unit,
- /// which usually means a multiple of 8.
- Type *extractContents(const TargetData &TD) {
- assert(R.getWidth() % BITS_PER_UNIT == 0 && "Boundaries not aligned?");
- /// If the current value for the type can be used to represent the bits in
- /// the range then just return it.
- if (isSafeToReturnContentsDirectly(TD))
- return Ty;
- // If the range is empty then return a type with zero size.
- if (R.empty()) {
- // Return an empty array. Remember the returned value as an optimization
- // in case we are called again.
- Ty = GetUnitType(Context, 0);
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
- return Ty;
- }
- // If the type is something like i17 then round it up to a multiple of a
- // byte. This is not needed for correctness, but helps the optimizers.
- if ((Ty->getPrimitiveSizeInBits() % BITS_PER_UNIT) != 0) {
- unsigned BitWidth = RoundUpToAlignment(Ty->getPrimitiveSizeInBits(),
- BITS_PER_UNIT);
- Ty = IntegerType::get(Context, BitWidth);
- if (isSafeToReturnContentsDirectly(TD))
- return Ty;
- }
- // Represent the range using an array of bytes. Remember the returned type
- // as an optimization in case we are called again.
- // TODO: If the type only needs to be truncated and has struct or array type
- // then we could try to do the truncation by dropping or modifying the last
- // elements of the type, maybe yielding something less horrible.
- uint64_t Units = R.getWidth() / BITS_PER_UNIT;
- Ty = GetUnitType(Context, Units);
- Starts = R.getFirst();
- assert(isSafeToReturnContentsDirectly(TD) && "Unit over aligned?");
- return Ty;
- }
-};
-
-} // Unnamed namespace.
-
-/// JoinWith - Form the union of this field with another field (which must be
-/// disjoint from this one). After this the range will be the convex hull of
-/// the ranges of the two fields.
-void TypedRange::JoinWith(const TypedRange &S) {
- if (S.R.empty())
- return;
- if (R.empty()) {
- *this = S;
- return;
- }
- // Use an integer type that covers both ranges. Turning everything into an
- // integer like this is pretty nasty, but as we only get here for bitfields
- // it is fairly harmless.
- R = R.Join(S.R);
- Ty = IntegerType::get(Context, R.getWidth());
- Starts = R.getFirst();
-}
-
-static Type *ConvertRecordTypeRecursive(tree type) {
- // FIXME: This new logic, especially the handling of bitfields, is untested
- // and probably wrong on big-endian machines.
- assert(TYPE_SIZE(type) && "Incomplete types should be handled elsewhere!");
-
- IntervalList<TypedRange, uint64_t, 8> Layout;
- const TargetData &TD = getTargetData();
-
- // Get the size of the type in bits. If the type has variable or ginormous
- // size then it is convenient to pretend it is "infinitely" big.
- uint64_t TypeSize = isInt64(TYPE_SIZE(type), true) ?
- getInt64(TYPE_SIZE(type), true) : ~0UL;
-
- // Record all interesting fields so they can easily be visited backwards.
- SmallVector<tree, 16> Fields;
- for (tree field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
- if (!isa<FIELD_DECL>(field)) continue;
- // Ignore fields with variable or unknown position since they cannot be
- // represented by the LLVM type system.
- if (!OffsetIsLLVMCompatible(field))
- continue;
- Fields.push_back(field);
- }
-
- // Process the fields in reverse order. This is for the benefit of union
- // types since it means that a zero constant of the LLVM type will fully
- // initialize the first union member, which is needed if the zero constant
- // is to be used as the default value for the union type.
- for (SmallVector<tree, 16>::reverse_iterator I = Fields.rbegin(),
- E = Fields.rend(); I != E; ++I) {
- tree field = *I;
- uint64_t FirstBit = getFieldOffsetInBits(field);
- assert(FirstBit <= TypeSize && "Field off end of type!");
- // Determine the width of the field.
- uint64_t BitWidth;
- Type *FieldTy = ConvertType(TREE_TYPE(field));
- if (isInt64(DECL_SIZE(field), true)) {
- // The field has a size and it is a constant, so use it. Note that
- // this size may be smaller than the type size. For example, if the
- // next field starts inside alignment padding at the end of this one
- // then DECL_SIZE will be the size with the padding used by the next
- // field not included.
- BitWidth = getInt64(DECL_SIZE(field), true);
- } else {
- // If the field has variable or unknown size then use the size of the
- // LLVM type instead as it gives the minimum size the field may have.
- assert(FieldTy->isSized() && "Type field has no size!");
- BitWidth = TD.getTypeAllocSizeInBits(FieldTy);
- if (FirstBit + BitWidth > TypeSize)
- BitWidth = TypeSize - FirstBit;
- }
- uint64_t LastBit = FirstBit + BitWidth;
-
- // Set the type of the range of bits occupied by the field to the LLVM type
- // for the field.
- Layout.AddInterval(TypedRange::get(FirstBit, LastBit, FieldTy));
- }
-
- // Force all fields to begin and end on a byte boundary. This automagically
- // takes care of bitfields.
- Layout.AlignBoundaries(BITS_PER_UNIT);
-
- // Determine whether to return a packed struct type. If returning an ordinary
- // struct would result in a type that is more aligned than the GCC type then
- // return a packed struct instead. If a field's alignment would make it start
- // after its desired position then also use a packed struct type.
- bool Pack = false;
- unsigned MaxAlign = TYPE_ALIGN(type);
- for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
- TypedRange F = Layout.getInterval(i);
- uint64_t First = F.getRange().getFirst();
- Type *Ty = F.extractContents(TD);
- unsigned Alignment = TD.getABITypeAlignment(Ty) * 8;
- if (Alignment > MaxAlign || First % Alignment) {
- Pack = true;
- break;
- }
- }
-
- // Create the elements that will make up the struct type. As well as the
- // fields themselves there may also be padding elements.
- std::vector<Type*> Elts;
- Elts.reserve(Layout.getNumIntervals());
- uint64_t EndOfPrevious = 0; // Offset of first bit after previous element.
- for (unsigned i = 0, e = Layout.getNumIntervals(); i != e; ++i) {
- TypedRange F = Layout.getInterval(i);
- uint64_t First = F.getRange().getFirst();
- Type *Ty = F.extractContents(TD);
- assert(EndOfPrevious <= First && "Previous field too big!");
-
- // If there is a gap then we may need to fill it with padding.
- if (First > EndOfPrevious) {
- // There is a gap between the end of the previous field and the start of
- // this one. The alignment of the field contents may mean that it will
- // start at the right offset anyway, but if not then insert padding.
- bool NeedPadding = true;
- if (!Pack) {
- // If the field's alignment will take care of the gap then there is no
- // need for padding.
- unsigned Alignment = TD.getABITypeAlignment(Ty) * 8;
- if (First == (EndOfPrevious + Alignment - 1) / Alignment * Alignment)
- NeedPadding = false;
- }
- if (NeedPadding) {
- // Fill the gap with an array of bytes.
- assert((First - EndOfPrevious) % BITS_PER_UNIT == 0 &&
- "Non-unit field boundaries!");
- uint64_t Units = (First - EndOfPrevious) / BITS_PER_UNIT;
- Elts.push_back(GetUnitType(Context, Units));
- }
- }
-
- // Append the field.
- Elts.push_back(Ty);
- EndOfPrevious = First + TD.getTypeAllocSizeInBits(Ty);
- }
-
- // If the GCC type has a sensible size then we guarantee that LLVM type has
- // the same size. If needed, append padding to ensure this.
- if (TypeSize != ~0UL && EndOfPrevious < TypeSize) {
- assert((TypeSize - EndOfPrevious) % BITS_PER_UNIT == 0 &&
- "Non-unit type size?");
- uint64_t Units = (TypeSize - EndOfPrevious) / BITS_PER_UNIT;
- Elts.push_back(GetUnitType(Context, Units));
- }
-
- // OK, we're done. Add the fields to the struct type and return it.
- Type *STy = getCachedType(type);
- assert(STy && isa<StructType>(STy) && cast<StructType>(STy)->isOpaque() &&
- "Incorrect placeholder for struct type!");
- cast<StructType>(STy)->setBody(Elts, Pack);
- return STy;
-}
-
-/// mayRecurse - Return true if converting this type may require breaking a
-/// self-referential type loop. For example, converting the struct type
-/// struct S;
-/// struct S {
-/// struct S* s;
-/// };
-/// requires converting the "struct S*" field type; converting that pointer
-/// type requires converting "struct S", leading to an infinite loop. On the
-/// other hand simple types like integers are never self-referential. As this
-/// routine is intended to be quick and simple, it returns true when in doubt.
-/// Note that if a complicated type has already been converted then false is
-/// usually returned, since type conversion doesn't have to do anything except
-/// return the previously computed LLVM type. The exception is record or union
-/// types which were first converted when incomplete but that are now complete
-/// so need to be converted again.
-static bool mayRecurse(tree type) {
- assert(type == TYPE_MAIN_VARIANT(type) && "Not converting the main variant!");
- switch (TREE_CODE(type)) {
- default:
- llvm_unreachable("Unknown type!");
-
- case BOOLEAN_TYPE:
- case ENUMERAL_TYPE:
- case FIXED_POINT_TYPE:
- case INTEGER_TYPE:
- case OFFSET_TYPE:
- case REAL_TYPE:
- case VOID_TYPE:
- // Simple types that are never self-referential.
- return false;
-
- case COMPLEX_TYPE:
- case VECTOR_TYPE:
- // Converting these types does involve converting another type, however that
- // conversion cannot refer back to the initial type.
- // NOTE: GCC supports vectors of pointers, and the pointer could refer back
- // to the vector. However as LLVM does not support vectors of pointers we
- // don't convert the pointer type and just use an integer instead, so as far
- // as we are concerned such vector types are not self-referential.
- return false;
-
- case ARRAY_TYPE:
- case FUNCTION_TYPE:
- case METHOD_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- // Converting these types may recurse unless the type was already converted.
- return getCachedType(type) == 0;
-
- case QUAL_UNION_TYPE:
- case RECORD_TYPE:
- case UNION_TYPE: {
- // Converting these types may recurse unless already converted. However if
- // the type was converted when still incomplete but is now complete then it
- // needs to be converted again, which might recurse.
-
- // If the type is incomplete then converting it will not recurse (conversion
- // just returns an opaque type).
- if (!TYPE_SIZE(type))
- return false;
-
- // If the type was not previously converted then converting it may recurse.
- Type *Ty = getCachedType(type);
- if (!Ty)
- return true;
-
- // If the type was previously converted when incomplete then converting it
- // may recurse as the type is now complete so needs to be converted again.
- if (cast<StructType>(Ty)->isOpaque())
- return true;
-
- // The type was already converted and does not need to be converted again.
- return false;
- }
- }
-}
-
-/// ConvertTypeRecursive - Convert a type when conversion may require breaking
-/// type conversion loops, see mayRecurse. Note that all types used by but not
-/// in the current strongly connected component (SCC) must have been converted
-/// already.
-static Type *ConvertTypeRecursive(tree type) {
- assert(type == TYPE_MAIN_VARIANT(type) && "Not converting the main variant!");
- assert(mayRecurse(type) && "Expected a recursive type!");
- assert(SCCInProgress && "Missing recursion data!");
-
-#ifndef NDEBUG
- // Check that the given type is in the current strongly connected component
- // (SCC) of the type graph. This should always be the case because SCCs are
- // visited bottom up.
- bool inSCC = false;
- for (size_t i = 0, e = SCCInProgress->size(); i != e; ++i)
- if ((*SCCInProgress)[i] == type) {
- inSCC = true;
- break;
- }
- if (!inSCC) {
- debug_tree(type);
- llvm_unreachable("Type not in SCC!");
- }
-#endif
-
- switch (TREE_CODE(type)) {
- default:
- debug_tree(type);
- llvm_unreachable("Unexpected type!");
-
- case ARRAY_TYPE:
- return RememberTypeConversion(type, ConvertArrayTypeRecursive(type));
-
- case FUNCTION_TYPE:
- case METHOD_TYPE: {
- CallingConv::ID CallingConv;
- AttrListPtr PAL;
- // No declaration to pass through, passing NULL.
- return RememberTypeConversion(type, ConvertFunctionType(type, NULL, NULL,
- CallingConv, PAL));
- }
-
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- return RememberTypeConversion(type, ConvertPointerTypeRecursive(type));
-
- case RECORD_TYPE:
- case UNION_TYPE:
- case QUAL_UNION_TYPE:
- return RememberTypeConversion(type, ConvertRecordTypeRecursive(type));
- }
-}
-
-/// ConvertTypeNonRecursive - Convert a type when this is known to not require
-/// breaking type conversion loops, see mayRecurse.
-static Type *ConvertTypeNonRecursive(tree type) {
- assert(type == TYPE_MAIN_VARIANT(type) && "Not converting the main variant!");
- assert(!mayRecurse(type) && "Expected a non-recursive type!");
-
- switch (TREE_CODE(type)) {
- default:
- debug_tree(type);
- llvm_unreachable("Unknown or recursive type!");
-
- case ARRAY_TYPE:
- case FUNCTION_TYPE:
- case METHOD_TYPE:
- case POINTER_TYPE:
- case REFERENCE_TYPE: {
- // If these types are not recursive it can only be because they were already
- // converted and we can safely return the result of the previous conversion.
- Type *Ty = getCachedType(type);
- assert(Ty && "Type not already converted!");
- return CheckTypeConversion(type, Ty);
- }
-
- case ENUMERAL_TYPE:
- // If the enum is incomplete return a placeholder type.
- if (!TYPE_SIZE(type))
- return CheckTypeConversion(type, GetUnitType(Context));
- // Otherwise fall through.
- case BOOLEAN_TYPE:
- case INTEGER_TYPE: {
- uint64_t Size = getInt64(TYPE_SIZE(type), true);
- // Caching the type conversion is not worth it.
- return CheckTypeConversion(type, IntegerType::get(Context, Size));
- }
-
- case COMPLEX_TYPE: {
- if (Type *Ty = getCachedType(type)) return Ty;
- Type *Ty = ConvertTypeNonRecursive(main_type(type));
- Ty = StructType::get(Ty, Ty, NULL);
- return RememberTypeConversion(type, Ty);
- }
-
- case OFFSET_TYPE:
- // Handle OFFSET_TYPE specially. This is used for pointers to members,
- // which are really just integer offsets. Return the appropriate integer
- // type directly.
- // Caching the type conversion is not worth it.
- return CheckTypeConversion(type, getTargetData().getIntPtrType(Context));
-
- case REAL_TYPE:
- // Caching the type conversion is not worth it.
- switch (TYPE_PRECISION(type)) {
- default:
- debug_tree(type);
- llvm_unreachable("Unknown FP type!");
- case 32: return CheckTypeConversion(type, Type::getFloatTy(Context));
- case 64: return CheckTypeConversion(type, Type::getDoubleTy(Context));
- case 80: return CheckTypeConversion(type, Type::getX86_FP80Ty(Context));
- case 128:
-#ifdef TARGET_POWERPC
- return CheckTypeConversion(type, Type::getPPC_FP128Ty(Context));
-#else
- // IEEE quad precision.
- return CheckTypeConversion(type, Type::getFP128Ty(Context));
-#endif
- }
-
- case RECORD_TYPE:
- case QUAL_UNION_TYPE:
- case UNION_TYPE:
- // If the type was already converted then return the already computed type.
- if (Type *Ty = getCachedType(type))
- return CheckTypeConversion(type, Ty);
-
- // Otherwise this must be an incomplete type - return an opaque struct.
- assert(!TYPE_SIZE(type) && "Expected an incomplete type!");
- return RememberTypeConversion(type,
- StructType::create(Context,
- getDescriptiveName(type)));
-
- case VECTOR_TYPE: {
- if (Type *Ty = getCachedType(type)) return Ty;
- Type *Ty;
- // LLVM does not support vectors of pointers, so turn any pointers into
- // integers.
- if (isa<ACCESS_TYPE>(TREE_TYPE(type)))
- Ty = getTargetData().getIntPtrType(Context);
- else
- Ty = ConvertTypeNonRecursive(main_type(type));
- Ty = VectorType::get(Ty, TYPE_VECTOR_SUBPARTS(type));
- return RememberTypeConversion(type, Ty);
- }
-
- case VOID_TYPE:
- // Caching the type conversion is not worth it.
- return CheckTypeConversion(type, Type::getVoidTy(Context));
- }
-}
-
-/// RecursiveTypeIterator - A convenience class that visits only those nodes
-/// in the type graph that mayRecurse thinks might be self-referential. Note
-/// that dereferencing returns the main variant of the contained type rather
-/// than the contained type itself. See ContainedTypeIterator and mayRecurse
-/// for more information about the type graph and self-referential types.
-namespace {
-
- class RecursiveTypeIterator {
- // This class wraps an iterator that visits all contained types, and just
- // increments the iterator over any contained types that will not recurse.
- ContainedTypeIterator I;
-
- /// SkipNonRecursiveTypes - Increment the wrapped iterator over any types
- /// that mayRecurse says can be converted directly without having to worry
- /// about self-recursion.
- void SkipNonRecursiveTypes() {
- while (I != ContainedTypeIterator::end() &&
- !mayRecurse(TYPE_MAIN_VARIANT(*I)))
- ++I;
- }
-
- /// RecursiveTypeIterator - Convenience constructor for internal use.
- explicit RecursiveTypeIterator(const ContainedTypeIterator& i) : I(i) {}
-
- public:
-
- /// Dereference operator returning the main variant of the contained type.
- tree operator*() {
- return TYPE_MAIN_VARIANT(*I);
- };
-
- /// Comparison operators.
- bool operator==(const RecursiveTypeIterator &other) const {
- return other.I == this->I;
- }
- bool operator!=(const RecursiveTypeIterator &other) const {
- return !(*this == other);
- }
-
- /// Postfix increment operator.
- RecursiveTypeIterator operator++(int) {
- RecursiveTypeIterator Result(*this);
- ++(*this);
- return Result;
- }
-
- /// Prefix increment operator.
- RecursiveTypeIterator& operator++() {
- ++I;
- SkipNonRecursiveTypes();
- return *this;
- }
-
- /// begin - Return an iterator referring to the first type contained in the
- /// given type.
- static RecursiveTypeIterator begin(tree type) {
- RecursiveTypeIterator R(ContainedTypeIterator::begin(type));
- R.SkipNonRecursiveTypes();
- return R;
- }
-
- /// end - Return the end iterator for contained type iteration.
- static RecursiveTypeIterator end() {
- return RecursiveTypeIterator(ContainedTypeIterator::end());
- }
- };
-
-} // Unnamed namespace.
-
-// Traits for working with the graph of possibly self-referential type nodes,
-// see RecursiveTypeIterator.
-namespace llvm {
- template <> struct GraphTraits<tree> {
- typedef tree_node NodeType;
- typedef RecursiveTypeIterator ChildIteratorType;
- static inline NodeType *getEntryNode(tree t) {
- assert(TYPE_P(t) && "Expected a type!");
- return t;
- }
- static inline ChildIteratorType child_begin(tree type) {
- return ChildIteratorType::begin(type);
- }
- static inline ChildIteratorType child_end(tree) {
- return ChildIteratorType::end();
- }
- };
-}
-
-Type *ConvertType(tree type) {
- if (type == error_mark_node) return Type::getInt32Ty(Context);
-
- // Check that the type mode doesn't depend on the type variant (various bits
- // of the plugin rely on this).
- assert(TYPE_MODE(type) == TYPE_MODE(TYPE_MAIN_VARIANT(type))
- && "Type mode differs between variants!");
-
- // LLVM doesn't care about variants such as const, volatile, or restrict.
- type = TYPE_MAIN_VARIANT(type);
-
- // If this type can be converted without special action being needed to avoid
- // conversion loops coming from self-referential types, then convert it.
- if (!mayRecurse(type))
- return ConvertTypeNonRecursive(type);
-
- // If we already started a possibly looping type conversion, continue with it.
- if (SCCInProgress)
- return ConvertTypeRecursive(type);
-
- // Begin converting a type for which the conversion may require breaking type
- // conversion loops coming from self-referential types, see mayRecurse. First
- // analyse all of the types that will need to be converted in order to convert
- // this one, finding sets of types that must be converted simultaneously (i.e.
- // for which converting any one of them requires converting all of the others;
- // these sets are the strongly connected components (SCCs) of the type graph),
- // then visit them bottom up, converting all types in them. "Bottom up" means
- // that if a type in a SCC makes use of a type T that is not in the SCC then T
- // will be visited first. Note that this analysis is performed only once: the
- // results of the type conversion are cached, and any future conversion of one
- // of the visited types will just return the cached value.
- for (scc_iterator<tree> I = scc_begin(type), E = scc_end(type); I != E; ++I) {
- const std::vector<tree> &SCC = *I;
-
- // First create a placeholder opaque struct for every record or union type
- // in the SCC. This way, if we have both "struct S" and "struct S*" in the
- // SCC then we can return an LLVM "%struct.s*" for the pointer rather than
- // the nasty {}* type we are obliged to return in general.
- for (size_t i = 0, e = SCC.size(); i != e; ++i) {
- tree some_type = SCC[i];
- if (!isa<RECORD_OR_UNION_TYPE>(some_type)) {
- assert(!getCachedType(some_type) && "Type already converted!");
- continue;
- }
- // If the type used to be incomplete then a opaque struct placeholder may
- // have been created for it already.
- Type *Ty = getCachedType(some_type);
- if (Ty) {
- assert(isa<StructType>(Ty) && cast<StructType>(Ty)->isOpaque() &&
- "Recursive struct already fully converted!");
- continue;
- }
- // Otherwise register a placeholder for this type.
- Ty = StructType::create(Context, getDescriptiveName(some_type));
- // Associate the placeholder with the GCC type without sanity checking
- // since the type sizes won't match yet.
- setCachedType(some_type, Ty);
- }
-
- // Now convert every type in the SCC, filling in the placeholders created
- // above. In the common case there is only one type in the SCC, meaning
- // that the type turned out not to be self-recursive and can be converted
- // without having to worry about type conversion loops. If there is more
- // than one type in the SCC then self-recursion is overcome by returning
- // {}* for the pointer types if nothing better can be done. As back edges
- // in the type graph can only be created by pointer types, "removing" such
- // edges like this destroys all cycles allowing the other types in the SCC
- // to be converted straightforwardly.
- SCCInProgress = &SCC;
- for (size_t i = 0, e = SCC.size(); i != e; ++i)
- ConvertType(SCC[i]);
- SCCInProgress = 0;
-
- // Finally, replace pointer types with a pointer to the pointee type (which
- // has now been computed). This means that while uses of the pointer type
- // by types in the SCC will most likely have been converted into nasty {}*,
- // uses by types outside the SCC will see a sensible pointer type. This is
- // not needed for correctness - it just makes the IR nicer.
- if (SCC.size() > 1)
- for (size_t i = 0, e = SCC.size(); i != e; ++i) {
- tree some_type = SCC[i];
- if (isa<ACCESS_TYPE>(some_type)) {
- tree pointee = main_type(some_type);
- // The pointee cannot have void type since the SCC contains more than
- // one type.
- assert(!isa<VOID_TYPE>(pointee) && "Self-recursive void*!");
- // The pointee must have been converted since it has to be in the same
- // SCC as the pointer (since the SCC contains more than one type).
- Type *PointeeTy = getCachedType(pointee);
- assert(PointeeTy && "Pointee not converted!");
- RememberTypeConversion(some_type, PointeeTy->getPointerTo());
- }
- }
- }
-
- // At this point every type reachable from this one has been converted, and
- // the conversion results cached. Return the value computed for the type.
- Type *Ty = getCachedType(type);
- assert(Ty && "Type not converted!");
- return Ty;
-}
More information about the llvm-commits
mailing list